pax_global_header00006660000000000000000000000064146235156310014520gustar00rootroot0000000000000052 comment=8ca76ea1e67c9732a67cab812f8129b68671b9cd jupyter_client-8.6.2/000077500000000000000000000000001462351563100145755ustar00rootroot00000000000000jupyter_client-8.6.2/.github/000077500000000000000000000000001462351563100161355ustar00rootroot00000000000000jupyter_client-8.6.2/.github/dependabot.yml000066400000000000000000000005071462351563100207670ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "weekly" groups: actions: patterns: - "*" - package-ecosystem: "pip" directory: "/" schedule: interval: "weekly" groups: actions: patterns: - "*" jupyter_client-8.6.2/.github/workflows/000077500000000000000000000000001462351563100201725ustar00rootroot00000000000000jupyter_client-8.6.2/.github/workflows/downstream.yml000066400000000000000000000104341462351563100231020ustar00rootroot00000000000000name: Test downstream projects on: push: branches: ["main"] pull_request: concurrency: group: downstream-${{ github.ref }} cancel-in-progress: true jobs: ipykernel: runs-on: ubuntu-latest timeout-minutes: 15 steps: - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: jupyterlab/maintainer-tools/.github/actions/downstream-test@v1 with: package_name: ipykernel nbclient: runs-on: ubuntu-latest timeout-minutes: 15 steps: - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: jupyterlab/maintainer-tools/.github/actions/downstream-test@v1 with: package_name: nbclient env_values: IPYKERNEL_CELL_NAME=\ papermill: runs-on: ubuntu-latest timeout-minutes: 15 steps: - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: jupyterlab/maintainer-tools/.github/actions/downstream-test@v1 with: package_name: papermill nbconvert: runs-on: ubuntu-latest timeout-minutes: 15 steps: - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: jupyterlab/maintainer-tools/.github/actions/downstream-test@v1 with: package_name: nbconvert package_spec: pip install -e ".[test]" jupyter_server: runs-on: ubuntu-latest timeout-minutes: 15 steps: - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: jupyterlab/maintainer-tools/.github/actions/downstream-test@v1 with: package_name: jupyter_server jupyter_kernel_test: runs-on: ubuntu-latest timeout-minutes: 10 steps: - name: Checkout uses: actions/checkout@v4 - name: Base Setup uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - name: Setup conda ${{ matrix.python-version }} uses: conda-incubator/setup-miniconda@v3 with: auto-update-conda: true activate-environment: jupyter_kernel_test miniforge-variant: Mambaforge python-version: ${{ matrix.python-version }} - name: Test jupyter_kernel_test shell: bash -l {0} run: | git clone https://github.com/jupyter/jupyter_kernel_test.git cd jupyter_kernel_test conda env update --name jupyter_kernel_test --file environment.yml pip install -e ".[test]" python -m unittest -v qtconsole: runs-on: ubuntu-latest timeout-minutes: 20 steps: - name: Checkout uses: actions/checkout@v4 - name: Setup Python uses: actions/setup-python@v5 with: python-version: "3.9" architecture: "x64" - name: Install System Packages run: | sudo apt-get update sudo apt-get install -y --no-install-recommends '^libxcb.*-dev' libx11-xcb-dev libglu1-mesa-dev libxrender-dev libxi-dev libxkbcommon-dev libxkbcommon-x11-dev - name: Install qtconsole dependencies shell: bash -l {0} run: | cd ${GITHUB_WORKSPACE}/.. git clone https://github.com/jupyter/qtconsole.git cd qtconsole ${pythonLocation}/bin/python -m pip install -e ".[test]" ${pythonLocation}/bin/python -m pip install pyqt5 - name: Install Jupyter-Client changes shell: bash -l {0} run: ${pythonLocation}/bin/python -m pip install -e . - name: Test qtconsole shell: bash -l {0} run: | cd ${GITHUB_WORKSPACE}/../qtconsole xvfb-run --auto-servernum ${pythonLocation}/bin/python -m pytest -x -vv -s --full-trace --color=yes qtconsole downstreams_check: # This job does nothing and is only used for the branch protection if: always() needs: - ipykernel - nbclient - papermill - nbconvert - jupyter_server - jupyter_kernel_test - qtconsole runs-on: ubuntu-latest steps: - name: Decide whether the needed jobs succeeded or failed uses: re-actors/alls-green@release/v1 with: jobs: ${{ toJSON(needs) }} jupyter_client-8.6.2/.github/workflows/enforce-label.yml000066400000000000000000000005001462351563100234060ustar00rootroot00000000000000name: Enforce PR label on: pull_request: types: [labeled, unlabeled, opened, edited, synchronize] jobs: enforce-label: runs-on: ubuntu-latest permissions: pull-requests: write steps: - name: enforce-triage-label uses: jupyterlab/maintainer-tools/.github/actions/enforce-label@v1 jupyter_client-8.6.2/.github/workflows/main.yml000066400000000000000000000114721462351563100216460ustar00rootroot00000000000000name: CI on: push: branches: ["main"] pull_request: concurrency: group: ci-${{ github.ref }} cancel-in-progress: true defaults: run: shell: bash -eux {0} jobs: check_release: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: jupyter-server/jupyter_releaser/.github/actions/check-release@v2 with: token: ${{ secrets.GITHUB_TOKEN }} check_links: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: jupyterlab/maintainer-tools/.github/actions/check-links@v1 test: runs-on: ${{ matrix.os }} timeout-minutes: 20 strategy: fail-fast: false matrix: os: [ubuntu-latest, windows-latest, macos-latest] python-version: ["3.8", "3.12"] include: - os: windows-latest python-version: "3.11" - os: ubuntu-latest python-version: "pypy-3.9" - os: ubuntu-latest python-version: "3.10" - os: macos-latest python-version: "3.9" steps: - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - name: Run the tests if: ${{ !startsWith(matrix.python-version, 'pypy') && !startsWith(matrix.os, 'windows') }} run: | hatch run cov:test --cov-fail-under 75 || hatch run test:test --lf - name: Run the tests on pypy if: ${{ startsWith(matrix.python-version, 'pypy') }} run: | hatch run test:nowarn || hatch run test:nowarn --lf - name: Run the tests on windows if: ${{ startsWith(matrix.os, 'windows') }} run: | hatch run cov:nowarn || hatch run test:nowarn --lf - uses: jupyterlab/maintainer-tools/.github/actions/upload-coverage@v1 coverage: runs-on: ubuntu-latest needs: - test steps: - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/report-coverage@v1 with: fail_under: 78 docs: runs-on: windows-latest steps: - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - name: Build API docs run: | hatch run docs:api # If this fails run `hatch run docs:api` locally # and commit. git status --porcelain git status -s | grep "A" && exit 1 git status -s | grep "M" && exit 1 echo "API docs done" - run: hatch run docs:build lint: name: Test Lint runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - name: Run Linters run: | hatch run typing:test hatch run lint:build pipx run interrogate -v . pipx run doc8 --max-line-length=200 --ignore-path=docs/source/other/full-config.rst test_minimum_verisons: name: Test Minimum Versions runs-on: ubuntu-latest timeout-minutes: 10 steps: - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 with: dependency_type: minimum - name: Run the unit tests run: | hatch -vv run test:nowarn || hatch run test:nowarn --lf test_prereleases: name: Test Prereleases timeout-minutes: 10 runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 with: dependency_type: pre - name: Run the tests run: | hatch run test:nowarn || hatch run test:nowarn --lf make_sdist: name: Make SDist runs-on: ubuntu-latest timeout-minutes: 10 steps: - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: jupyterlab/maintainer-tools/.github/actions/make-sdist@v1 test_sdist: runs-on: ubuntu-latest needs: [make_sdist] name: Install from SDist and Test timeout-minutes: 20 steps: - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: jupyterlab/maintainer-tools/.github/actions/test-sdist@v1 with: test_command: pytest -vv || pytest -vv --lf tests_check: # This job does nothing and is only used for the branch protection if: always() needs: - coverage - docs - lint - check_links - test_minimum_verisons - test_prereleases - test_sdist runs-on: ubuntu-latest steps: - name: Decide whether the needed jobs succeeded or failed uses: re-actors/alls-green@release/v1 with: jobs: ${{ toJSON(needs) }} jupyter_client-8.6.2/.github/workflows/prep-release.yml000066400000000000000000000032311462351563100233000ustar00rootroot00000000000000name: "Step 1: Prep Release" on: workflow_dispatch: inputs: version_spec: description: "New Version Specifier" default: "next" required: false branch: description: "The branch to target" required: false post_version_spec: description: "Post Version Specifier" required: false silent: description: "Set a placeholder in the changelog and don't publish the release." required: false type: boolean since: description: "Use PRs with activity since this date or git reference" required: false since_last_stable: description: "Use PRs with activity since the last stable git tag" required: false type: boolean jobs: prep_release: runs-on: ubuntu-latest permissions: contents: write steps: - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - name: Prep Release id: prep-release uses: jupyter-server/jupyter_releaser/.github/actions/prep-release@v2 with: token: ${{ secrets.GITHUB_TOKEN }} version_spec: ${{ github.event.inputs.version_spec }} silent: ${{ github.event.inputs.silent }} post_version_spec: ${{ github.event.inputs.post_version_spec }} target: ${{ github.event.inputs.target }} branch: ${{ github.event.inputs.branch }} since: ${{ github.event.inputs.since }} since_last_stable: ${{ github.event.inputs.since_last_stable }} - name: "** Next Step **" run: | echo "Optional): Review Draft Release: ${{ steps.prep-release.outputs.release_url }}" jupyter_client-8.6.2/.github/workflows/publish-changelog.yml000066400000000000000000000016401462351563100243110ustar00rootroot00000000000000name: "Publish Changelog" on: release: types: [published] workflow_dispatch: inputs: branch: description: "The branch to target" required: false jobs: publish_changelog: runs-on: ubuntu-latest environment: release steps: - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: actions/create-github-app-token@v1 id: app-token with: app-id: ${{ vars.APP_ID }} private-key: ${{ secrets.APP_PRIVATE_KEY }} - name: Publish changelog id: publish-changelog uses: jupyter-server/jupyter_releaser/.github/actions/publish-changelog@v2 with: token: ${{ steps.app-token.outputs.token }} branch: ${{ github.event.inputs.branch }} - name: "** Next Step **" run: | echo "Merge the changelog update PR: ${{ steps.publish-changelog.outputs.pr_url }}" jupyter_client-8.6.2/.github/workflows/publish-release.yml000066400000000000000000000034061462351563100240040ustar00rootroot00000000000000name: "Step 2: Publish Release" on: workflow_dispatch: inputs: branch: description: "The target branch" required: false release_url: description: "The URL of the draft GitHub release" required: false steps_to_skip: description: "Comma separated list of steps to skip" required: false jobs: publish_release: runs-on: ubuntu-latest environment: release permissions: id-token: write steps: - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: actions/create-github-app-token@v1 id: app-token with: app-id: ${{ vars.APP_ID }} private-key: ${{ secrets.APP_PRIVATE_KEY }} - name: Populate Release id: populate-release uses: jupyter-server/jupyter_releaser/.github/actions/populate-release@v2 with: token: ${{ steps.app-token.outputs.token }} branch: ${{ github.event.inputs.branch }} release_url: ${{ github.event.inputs.release_url }} steps_to_skip: ${{ github.event.inputs.steps_to_skip }} - name: Finalize Release id: finalize-release uses: jupyter-server/jupyter_releaser/.github/actions/finalize-release@v2 with: token: ${{ steps.app-token.outputs.token }} release_url: ${{ steps.populate-release.outputs.release_url }} - name: "** Next Step **" if: ${{ success() }} run: | echo "Verify the final release" echo ${{ steps.finalize-release.outputs.release_url }} - name: "** Failure Message **" if: ${{ failure() }} run: | echo "Failed to Publish the Draft Release Url:" echo ${{ steps.populate-release.outputs.release_url }} jupyter_client-8.6.2/.gitignore000066400000000000000000000005471462351563100165730ustar00rootroot00000000000000MANIFEST build dist *.py[cod] __pycache__ *.egg-info *~ *.bak .ipynb_checkpoints .tox .DS_Store \#*# .#* .coverage .cache absolute.json htmlcov/ docs/changelog.md .mypy_cache Pipfile Pipfle.lock # Sphinx documentation _build docs/_build/ docs/gh-pages # PyBuilder target/ # PyCharm .idea/ *.iml # Failing tests sometimes leave these behind: kernel-*.json jupyter_client-8.6.2/.mailmap000066400000000000000000000250661462351563100162270ustar00rootroot00000000000000A. J. Holyoake ajholyoake Aaron Culich Aaron Culich Aron Ahmadia ahmadia Benjamin Ragan-Kelley Benjamin Ragan-Kelley Min RK Benjamin Ragan-Kelley MinRK Barry Wark Barry Wark Ben Edwards Ben Edwards Bradley M. Froehle Bradley M. Froehle Bradley M. Froehle Bradley Froehle Brandon Parsons Brandon Parsons Brian E. Granger Brian Granger Brian E. Granger Brian Granger <> Brian E. Granger bgranger <> Brian E. Granger bgranger Christoph Gohlke cgohlke Cyrille Rossant rossant Damián Avila damianavila Damián Avila damianavila Damon Allen damontallen Darren Dale darren.dale <> Darren Dale Darren Dale <> Dav Clark Dav Clark <> Dav Clark Dav Clark David Hirschfeld dhirschfeld David P. Sanders David P. Sanders David Warde-Farley David Warde-Farley <> Doug Blank Doug Blank Eugene Van den Bulke Eugene Van den Bulke Evan Patterson Evan Patterson Evan Patterson Evan Patterson Evan Patterson epatters Evan Patterson epatters Ernie French Ernie French Ernie French ernie french Ernie French ernop Fernando Perez Fernando Perez Fernando Perez Fernando Perez fperez <> Fernando Perez fptest <> Fernando Perez fptest1 <> Fernando Perez Fernando Perez Fernando Perez Fernando Perez <> Fernando Perez Fernando Perez Frank Murphy Frank Murphy Gabriel Becker gmbecker Gael Varoquaux gael.varoquaux <> Gael Varoquaux gvaroquaux Gael Varoquaux Gael Varoquaux <> Ingolf Becker watercrossing Jake Vanderplas Jake Vanderplas Jakob Gager jakobgager Jakob Gager jakobgager Jakob Gager jakobgager Jason Grout Jason Grout Jason Gors jason gors Jason Gors jgors Jens Hedegaard Nielsen Jens Hedegaard Nielsen Jens Hedegaard Nielsen Jens H Nielsen Jens Hedegaard Nielsen Jens H. Nielsen Jez Ng Jez Ng Jonathan Frederic Jonathan Frederic Jonathan Frederic Jonathan Frederic Jonathan Frederic Jonathan Frederic Jonathan Frederic jon Jonathan Frederic U-Jon-PC\Jon Jonathan March Jonathan March Jonathan March jdmarch Jörgen Stenarson Jörgen Stenarson Jörgen Stenarson Jorgen Stenarson Jörgen Stenarson Jorgen Stenarson <> Jörgen Stenarson jstenar Jörgen Stenarson jstenar <> Jörgen Stenarson Jörgen Stenarson Juergen Hasch juhasch Juergen Hasch juhasch Julia Evans Julia Evans Kester Tong KesterTong Kyle Kelley Kyle Kelley Kyle Kelley rgbkrk Laurent Dufréchou Laurent Dufréchou Laurent Dufréchou laurent dufrechou <> Laurent Dufréchou laurent.dufrechou <> Laurent Dufréchou Laurent Dufrechou <> Laurent Dufréchou laurent.dufrechou@gmail.com <> Laurent Dufréchou ldufrechou Lorena Pantano Lorena Luis Pedro Coelho Luis Pedro Coelho Marc Molla marcmolla Martín Gaitán Martín Gaitán Matthias Bussonnier Matthias BUSSONNIER Matthias Bussonnier Bussonnier Matthias Matthias Bussonnier Matthias BUSSONNIER Matthias Bussonnier Matthias Bussonnier Michael Droettboom Michael Droettboom Nicholas Bollweg Nicholas Bollweg (Nick) Nicolas Rougier Nikolay Koldunov Nikolay Koldunov Omar Andrés Zapata Mesa Omar Andres Zapata Mesa Omar Andrés Zapata Mesa Omar Andres Zapata Mesa Pankaj Pandey Pankaj Pandey Pascal Schetelat pascal-schetelat Paul Ivanov Paul Ivanov Pauli Virtanen Pauli Virtanen <> Pauli Virtanen Pauli Virtanen Pierre Gerold Pierre Gerold Pietro Berkes Pietro Berkes Piti Ongmongkolkul piti118 Prabhu Ramachandran Prabhu Ramachandran <> Puneeth Chaganti Puneeth Chaganti Robert Kern rkern <> Robert Kern Robert Kern Robert Kern Robert Kern Robert Kern Robert Kern <> Robert Marchman Robert Marchman Satrajit Ghosh Satrajit Ghosh Satrajit Ghosh Satrajit Ghosh Scott Sanderson Scott Sanderson smithj1 smithj1 smithj1 smithj1 Steven Johnson stevenJohnson Steven Silvester blink1073 S. Weber s8weber Stefan van der Walt Stefan van der Walt Silvia Vinyes Silvia Silvia Vinyes silviav12 Sylvain Corlay Sylvain Corlay sylvain.corlay Ted Drain TD22057 Théophile Studer Théophile Studer Thomas Kluyver Thomas Thomas Spura Thomas Spura Timo Paulssen timo vds vds2212 vds vds Ville M. Vainio Ville M. Vainio ville Ville M. Vainio ville Ville M. Vainio vivainio <> Ville M. Vainio Ville M. Vainio Ville M. Vainio Ville M. Vainio Walter Doerwald walter.doerwald <> Walter Doerwald Walter Doerwald <> W. Trevor King W. Trevor King Yoval P. y-p jupyter_client-8.6.2/.pre-commit-config.yaml000066400000000000000000000041601462351563100210570ustar00rootroot00000000000000ci: autoupdate_schedule: monthly autoupdate_commit_msg: "chore: update pre-commit hooks" repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: - id: check-case-conflict - id: check-ast - id: check-docstring-first - id: check-executables-have-shebangs - id: check-added-large-files - id: check-case-conflict - id: check-merge-conflict - id: check-json - id: check-toml - id: check-yaml - id: debug-statements - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/python-jsonschema/check-jsonschema rev: 0.27.4 hooks: - id: check-github-workflows - repo: https://github.com/executablebooks/mdformat rev: 0.7.17 hooks: - id: mdformat - repo: https://github.com/pre-commit/mirrors-prettier rev: "v4.0.0-alpha.8" hooks: - id: prettier types_or: [yaml, html, json] - repo: https://github.com/pre-commit/mirrors-mypy rev: "v1.8.0" hooks: - id: mypy files: jupyter_client stages: [manual] args: ["--install-types", "--non-interactive"] additional_dependencies: ["traitlets>=5.13", "ipykernel>=6.26", "jupyter_core>=5.3.2"] - repo: https://github.com/adamchainz/blacken-docs rev: "1.16.0" hooks: - id: blacken-docs additional_dependencies: [black==23.7.0] - repo: https://github.com/codespell-project/codespell rev: "v2.2.6" hooks: - id: codespell args: ["-L", "sur,nd"] - repo: https://github.com/pre-commit/pygrep-hooks rev: "v1.10.0" hooks: - id: rst-backticks - id: rst-directive-colons - id: rst-inline-touching-normal - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.2.0 hooks: - id: ruff types_or: [python, jupyter] args: ["--fix", "--show-fixes"] - id: ruff-format types_or: [python, jupyter] - repo: https://github.com/scientific-python/cookie rev: "2024.01.24" hooks: - id: sp-repo-review additional_dependencies: ["repo-review[cli]"] jupyter_client-8.6.2/.readthedocs.yaml000066400000000000000000000003061462351563100200230ustar00rootroot00000000000000version: 2 sphinx: configuration: docs/conf.py python: install: # install jupyter-client itself - method: pip path: ".[docs]" build: os: ubuntu-22.04 tools: python: "3.11" jupyter_client-8.6.2/CHANGELOG.md000066400000000000000000002516171462351563100164220ustar00rootroot00000000000000# Changes in Jupyter Client {#changelog} ## 8.6.2 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.6.1...474093f4237744dee2af1c8929e2afe5354ef356)) ### Bugs fixed - Use non-blocking zmq Poller [#1023](https://github.com/jupyter/jupyter_client/pull/1023) ([@fcollonval](https://github.com/fcollonval)) ### Documentation improvements - use https url [#1021](https://github.com/jupyter/jupyter_client/pull/1021) ([@Carreau](https://github.com/Carreau)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2024-03-12&to=2024-05-23&type=c)) [@Carreau](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3ACarreau+updated%3A2024-03-12..2024-05-23&type=Issues) | [@fcollonval](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Afcollonval+updated%3A2024-03-12..2024-05-23&type=Issues) ## 8.6.1 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.6.0...8a3327ae58247be734d51f44c629354f0f600660)) ### Maintenance and upkeep improvements - Update Release Scripts [#1016](https://github.com/jupyter/jupyter_client/pull/1016) ([@blink1073](https://github.com/blink1073)) - chore: update pre-commit hooks [#1008](https://github.com/jupyter/jupyter_client/pull/1008) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - chore: update pre-commit hooks [#1002](https://github.com/jupyter/jupyter_client/pull/1002) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - Bump actions/setup-python from 4 to 5 [#1000](https://github.com/jupyter/jupyter_client/pull/1000) ([@dependabot](https://github.com/dependabot)) - chore: update pre-commit hooks [#999](https://github.com/jupyter/jupyter_client/pull/999) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - Bump conda-incubator/setup-miniconda from 2 to 3 [#998](https://github.com/jupyter/jupyter_client/pull/998) ([@dependabot](https://github.com/dependabot)) - chore: update pre-commit hooks [#996](https://github.com/jupyter/jupyter_client/pull/996) ([@pre-commit-ci](https://github.com/pre-commit-ci)) ### Documentation improvements - Fix docs reference [#1017](https://github.com/jupyter/jupyter_client/pull/1017) ([@blink1073](https://github.com/blink1073)) - Add docs about packaging kernels [#1013](https://github.com/jupyter/jupyter_client/pull/1013) ([@blink1073](https://github.com/blink1073)) - Clarify what a restart means [#966](https://github.com/jupyter/jupyter_client/pull/966) ([@mlucool](https://github.com/mlucool)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2023-11-06&to=2024-03-12&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2023-11-06..2024-03-12&type=Issues) | [@dependabot](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adependabot+updated%3A2023-11-06..2024-03-12&type=Issues) | [@mlucool](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Amlucool+updated%3A2023-11-06..2024-03-12&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2023-11-06..2024-03-12&type=Issues) | [@Zsailer](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3AZsailer+updated%3A2023-11-06..2024-03-12&type=Issues) ## 8.6.0 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.5.0...2d6f52bdf5266688c51f3270cd7e47bbd17c708c)) ### Bugs fixed - Fix possibly not defined tracker [#991](https://github.com/jupyter/jupyter_client/pull/991) ([@davidbrochart](https://github.com/davidbrochart)) - BUG: Fix Kwarg only in update_env [#989](https://github.com/jupyter/jupyter_client/pull/989) ([@Carreau](https://github.com/Carreau)) ### Maintenance and upkeep improvements - Update typing for traitlets 5.13 [#995](https://github.com/jupyter/jupyter_client/pull/995) ([@blink1073](https://github.com/blink1073)) - Use ruff format [#992](https://github.com/jupyter/jupyter_client/pull/992) ([@blink1073](https://github.com/blink1073)) - Unused `*args` in `KernelManager`'s `__init__` [#986](https://github.com/jupyter/jupyter_client/pull/986) ([@Carreau](https://github.com/Carreau)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2023-10-25&to=2023-11-06&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2023-10-25..2023-11-06&type=Issues) | [@Carreau](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3ACarreau+updated%3A2023-10-25..2023-11-06&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adavidbrochart+updated%3A2023-10-25..2023-11-06&type=Issues) ## 8.5.0 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.4.0...ff94e310c2af3546f2f9384e6b35fd11c3d09a71)) ### Enhancements made - Allow to update kernels env in between restart. [#987](https://github.com/jupyter/jupyter_client/pull/987) ([@Carreau](https://github.com/Carreau)) ### Maintenance and upkeep improvements - Enable strict typing [#984](https://github.com/jupyter/jupyter_client/pull/984) ([@blink1073](https://github.com/blink1073)) - Update typings for mypy 1.6 [#983](https://github.com/jupyter/jupyter_client/pull/983) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2023-10-11&to=2023-10-25&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2023-10-11..2023-10-25&type=Issues) | [@Carreau](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3ACarreau+updated%3A2023-10-11..2023-10-25&type=Issues) ## 8.4.0 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.3.1...74044393230e70134f68e664f2ef19fab92b7774)) ### Maintenance and upkeep improvements - Test on python 3.12 [#978](https://github.com/jupyter/jupyter_client/pull/978) ([@blink1073](https://github.com/blink1073)) - Update typing for traitlets 5.11 [#977](https://github.com/jupyter/jupyter_client/pull/977) ([@blink1073](https://github.com/blink1073)) - chore: update pre-commit hooks [#975](https://github.com/jupyter/jupyter_client/pull/975) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - Update typings for traitlets 5.10.1 [#974](https://github.com/jupyter/jupyter_client/pull/974) ([@blink1073](https://github.com/blink1073)) - Do not use datetime.utcnow() that is deprecated in Python 3.12 [#972](https://github.com/jupyter/jupyter_client/pull/972) ([@ellert](https://github.com/ellert)) - Use sp-repo-review [#971](https://github.com/jupyter/jupyter_client/pull/971) ([@blink1073](https://github.com/blink1073)) - Bump actions/checkout from 3 to 4 [#968](https://github.com/jupyter/jupyter_client/pull/968) ([@dependabot](https://github.com/dependabot)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2023-08-29&to=2023-10-11&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2023-08-29..2023-10-11&type=Issues) | [@dependabot](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adependabot+updated%3A2023-08-29..2023-10-11&type=Issues) | [@ellert](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aellert+updated%3A2023-08-29..2023-10-11&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2023-08-29..2023-10-11&type=Issues) ## 8.3.1 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.3.0...b4f7d947fae55a4fe59a27df0830a9a78dcd4e12)) ### Enhancements made - Support external kernels [#961](https://github.com/jupyter/jupyter_client/pull/961) ([@davidbrochart](https://github.com/davidbrochart)) ### Bugs fixed - Make cache_ports configurable with default value of False. [#956](https://github.com/jupyter/jupyter_client/pull/956) ([@jkitchin](https://github.com/jkitchin)) ### Maintenance and upkeep improvements ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2023-06-23&to=2023-08-29&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2023-06-23..2023-08-29&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adavidbrochart+updated%3A2023-06-23..2023-08-29&type=Issues) | [@jkitchin](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ajkitchin+updated%3A2023-06-23..2023-08-29&type=Issues) | [@kevin-bates](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Akevin-bates+updated%3A2023-06-23..2023-08-29&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2023-06-23..2023-08-29&type=Issues) | [@tmaxwell-anthropic](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Atmaxwell-anthropic+updated%3A2023-06-23..2023-08-29&type=Issues) ## 8.3.0 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.2.0...bddb8854a4aa3324e128e0497539e17246fbf630)) ### Enhancements made - Allow kwargs when writing connection_file [#953](https://github.com/jupyter/jupyter_client/pull/953) ([@fecet](https://github.com/fecet)) ### Maintenance and upkeep improvements ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2023-04-13&to=2023-06-23&type=c)) [@fecet](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Afecet+updated%3A2023-04-13..2023-06-23&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2023-04-13..2023-06-23&type=Issues) ## 8.2.0 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.1.0...dbf6b81fa5ab606eaedc5e8d0843debce18e8746)) ### Enhancements made - use c.f.Future to wait across threads [#940](https://github.com/jupyter/jupyter_client/pull/940) ([@minrk](https://github.com/minrk)) ### Maintenance and upkeep improvements - Use local coverage [#945](https://github.com/jupyter/jupyter_client/pull/945) ([@blink1073](https://github.com/blink1073)) - Add more project URLs [#944](https://github.com/jupyter/jupyter_client/pull/944) ([@fcollonval](https://github.com/fcollonval)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2023-03-20&to=2023-04-13&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2023-03-20..2023-04-13&type=Issues) | [@fcollonval](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Afcollonval+updated%3A2023-03-20..2023-04-13&type=Issues) | [@minrk](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aminrk+updated%3A2023-03-20..2023-04-13&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2023-03-20..2023-04-13&type=Issues) ## 8.1.0 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.0.3...e3ac7a69355dd1af66038eda767e51e92ef034fb)) ### Bugs fixed - ThreadedZMQStream: close stream before socket [#936](https://github.com/jupyter/jupyter_client/pull/936) ([@minrk](https://github.com/minrk)) ### Maintenance and upkeep improvements ### Documentation improvements - Adds spec for the copyToGlobals request [#932](https://github.com/jupyter/jupyter_client/pull/932) ([@brichet](https://github.com/brichet)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2023-02-16&to=2023-03-20&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2023-02-16..2023-03-20&type=Issues) | [@brichet](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Abrichet+updated%3A2023-02-16..2023-03-20&type=Issues) | [@minrk](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aminrk+updated%3A2023-02-16..2023-03-20&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2023-02-16..2023-03-20&type=Issues) ## 8.0.3 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.0.2...dc0eaba1f609079672ec739fcd977dc44431da92)) ### Bugs fixed - Fix kernelspec print output [#933](https://github.com/jupyter/jupyter_client/pull/933) ([@minrk](https://github.com/minrk)) - Don't emit a trailng newline in base64-encoded data like 'image/png' [#931](https://github.com/jupyter/jupyter_client/pull/931) ([@xl0](https://github.com/xl0)) ### Maintenance and upkeep improvements - Add license [#934](https://github.com/jupyter/jupyter_client/pull/934) ([@dcsaba89](https://github.com/dcsaba89)) - Improving jsonutil tests [#929](https://github.com/jupyter/jupyter_client/pull/929) ([@andrehora](https://github.com/andrehora)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2023-01-30&to=2023-02-16&type=c)) [@andrehora](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aandrehora+updated%3A2023-01-30..2023-02-16&type=Issues) | [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2023-01-30..2023-02-16&type=Issues) | [@dcsaba89](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adcsaba89+updated%3A2023-01-30..2023-02-16&type=Issues) | [@kevin-bates](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Akevin-bates+updated%3A2023-01-30..2023-02-16&type=Issues) | [@minrk](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aminrk+updated%3A2023-01-30..2023-02-16&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2023-01-30..2023-02-16&type=Issues) | [@xl0](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Axl0+updated%3A2023-01-30..2023-02-16&type=Issues) ## 8.0.2 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.0.1...717d36edcd9ce595f727d8b5a27e270c2a6e2c46)) ### Bugs fixed - Add papermill downstream check and fix kernel client replies [#925](https://github.com/jupyter/jupyter_client/pull/925) ([@blink1073](https://github.com/blink1073)) ### Maintenance and upkeep improvements - Adopt more ruff rules [#924](https://github.com/jupyter/jupyter_client/pull/924) ([@blink1073](https://github.com/blink1073)) - Prefer print in kernelspecapp [#923](https://github.com/jupyter/jupyter_client/pull/923) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2023-01-26&to=2023-01-30&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2023-01-26..2023-01-30&type=Issues) ## 8.0.1 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.0.0...dc6113c360e05122430b8e130374e9f4e4b701d7)) ### Bugs fixed - Fix json_output in kernelspec app [#921](https://github.com/jupyter/jupyter_client/pull/921) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2023-01-26&to=2023-01-26&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2023-01-26..2023-01-26&type=Issues) ## 8.0.0 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.3.5...760a7835d8b20a9daea3737759b1751d5e55dad8)) This release is primarily focused on improving `asyncio` support, while aiming to have minimal API changes. ### Enhancements made - Remove nest-asyncio dependency [#835](https://github.com/jupyter/jupyter_client/pull/835) ([@blink1073](https://github.com/blink1073)) ### Bugs fixed - Allow interrupt during restart of pending kernels [#898](https://github.com/jupyter/jupyter_client/pull/898) ([@blink1073](https://github.com/blink1073)) - Fix connection reconciliation to handle restarts [#882](https://github.com/jupyter/jupyter_client/pull/882) ([@kevin-bates](https://github.com/kevin-bates)) - Reconcile connection information [#879](https://github.com/jupyter/jupyter_client/pull/879) ([@kevin-bates](https://github.com/kevin-bates)) - Workaround for launch bug [#861](https://github.com/jupyter/jupyter_client/pull/861) ([@blink1073](https://github.com/blink1073)) - Defer creation of ready future [#858](https://github.com/jupyter/jupyter_client/pull/858) ([@blink1073](https://github.com/blink1073)) - Fix handling of initial ready promise [#854](https://github.com/jupyter/jupyter_client/pull/854) ([@blink1073](https://github.com/blink1073)) - Revert "Fix pending kernels again" [#853](https://github.com/jupyter/jupyter_client/pull/853) ([@blink1073](https://github.com/blink1073)) - Fix pending kernels again [#845](https://github.com/jupyter/jupyter_client/pull/845) ([@blink1073](https://github.com/blink1073)) - Use pytest_asyncio fixture [#826](https://github.com/jupyter/jupyter_client/pull/826) ([@davidbrochart](https://github.com/davidbrochart)) ### Maintenance and upkeep improvements - MAINT: Don't format log in log call. [#919](https://github.com/jupyter/jupyter_client/pull/919) ([@Carreau](https://github.com/Carreau)) - Remove deprecated zmq imports [#915](https://github.com/jupyter/jupyter_client/pull/915) ([@blink1073](https://github.com/blink1073)) - MAINT: consistently use relative imports. [#912](https://github.com/jupyter/jupyter_client/pull/912) ([@Carreau](https://github.com/Carreau)) - Sync lint deps [#911](https://github.com/jupyter/jupyter_client/pull/911) ([@blink1073](https://github.com/blink1073)) - MAINT: Proper typing and cast [#906](https://github.com/jupyter/jupyter_client/pull/906) ([@Carreau](https://github.com/Carreau)) - MAINT: \[_async_\]start_kernel should only take kwarg only. [#905](https://github.com/jupyter/jupyter_client/pull/905) ([@Carreau](https://github.com/Carreau)) - Add more ci checks [#903](https://github.com/jupyter/jupyter_client/pull/903) ([@blink1073](https://github.com/blink1073)) - Allow releasing from repo [#899](https://github.com/jupyter/jupyter_client/pull/899) ([@blink1073](https://github.com/blink1073)) - Fix jupyter_core pinning [#896](https://github.com/jupyter/jupyter_client/pull/896) ([@ophie200](https://github.com/ophie200)) - Adopt ruff and reduce pre-commit usage [#895](https://github.com/jupyter/jupyter_client/pull/895) ([@blink1073](https://github.com/blink1073)) - Use pytest-jupyter [#891](https://github.com/jupyter/jupyter_client/pull/891) ([@blink1073](https://github.com/blink1073)) - Import ensure_async and run_sync from jupyter_core [#889](https://github.com/jupyter/jupyter_client/pull/889) ([@davidbrochart](https://github.com/davidbrochart)) - Use base setup dependency type [#888](https://github.com/jupyter/jupyter_client/pull/888) ([@blink1073](https://github.com/blink1073)) - More CI Cleanup [#886](https://github.com/jupyter/jupyter_client/pull/886) ([@blink1073](https://github.com/blink1073)) - More coverage [#885](https://github.com/jupyter/jupyter_client/pull/885) ([@blink1073](https://github.com/blink1073)) - Clean up workflow and pyproject [#884](https://github.com/jupyter/jupyter_client/pull/884) ([@blink1073](https://github.com/blink1073)) - Add more coverage [#877](https://github.com/jupyter/jupyter_client/pull/877) ([@blink1073](https://github.com/blink1073)) - Add coverage config [#876](https://github.com/jupyter/jupyter_client/pull/876) ([@blink1073](https://github.com/blink1073)) - Bump actions/setup-python from 2 to 4 [#874](https://github.com/jupyter/jupyter_client/pull/874) ([@dependabot](https://github.com/dependabot)) - Bump actions/checkout from 2 to 3 [#873](https://github.com/jupyter/jupyter_client/pull/873) ([@dependabot](https://github.com/dependabot)) - Use platform dirs in tests [#872](https://github.com/jupyter/jupyter_client/pull/872) ([@blink1073](https://github.com/blink1073)) - Clean up types and remove use of entrypoints [#871](https://github.com/jupyter/jupyter_client/pull/871) ([@blink1073](https://github.com/blink1073)) - Add dependabot [#870](https://github.com/jupyter/jupyter_client/pull/870) ([@blink1073](https://github.com/blink1073)) - Support Python 3.8-3.11 [#866](https://github.com/jupyter/jupyter_client/pull/866) ([@blink1073](https://github.com/blink1073)) - Fix assertion in `TestSession.test_serialize` [#860](https://github.com/jupyter/jupyter_client/pull/860) ([@samrat](https://github.com/samrat)) - Maintenance cleanup [#856](https://github.com/jupyter/jupyter_client/pull/856) ([@blink1073](https://github.com/blink1073)) - Ignore warnings in prereleases test [#844](https://github.com/jupyter/jupyter_client/pull/844) ([@blink1073](https://github.com/blink1073)) - Use hatch for version [#837](https://github.com/jupyter/jupyter_client/pull/837) ([@blink1073](https://github.com/blink1073)) - Move tests to top level [#834](https://github.com/jupyter/jupyter_client/pull/834) ([@blink1073](https://github.com/blink1073)) - Fix nbconvert downstream test [#827](https://github.com/jupyter/jupyter_client/pull/827) ([@blink1073](https://github.com/blink1073)) ### Documentation improvements - Reflect current protocol version in documentation [#918](https://github.com/jupyter/jupyter_client/pull/918) ([@SylvainCorlay](https://github.com/SylvainCorlay)) - Add full api docs [#908](https://github.com/jupyter/jupyter_client/pull/908) ([@blink1073](https://github.com/blink1073)) - Add more ci checks [#903](https://github.com/jupyter/jupyter_client/pull/903) ([@blink1073](https://github.com/blink1073)) - Switch to pydata sphinx theme [#840](https://github.com/jupyter/jupyter_client/pull/840) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-08-25&to=2023-01-26&type=c)) [@arogozhnikov](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aarogozhnikov+updated%3A2022-08-25..2023-01-26&type=Issues) | [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2022-08-25..2023-01-26&type=Issues) | [@Carreau](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3ACarreau+updated%3A2022-08-25..2023-01-26&type=Issues) | [@ccordoba12](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Accordoba12+updated%3A2022-08-25..2023-01-26&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adavidbrochart+updated%3A2022-08-25..2023-01-26&type=Issues) | [@dependabot](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adependabot+updated%3A2022-08-25..2023-01-26&type=Issues) | [@kevin-bates](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Akevin-bates+updated%3A2022-08-25..2023-01-26&type=Issues) | [@meeseeksdev](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ameeseeksdev+updated%3A2022-08-25..2023-01-26&type=Issues) | [@minrk](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aminrk+updated%3A2022-08-25..2023-01-26&type=Issues) | [@ophie200](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aophie200+updated%3A2022-08-25..2023-01-26&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2022-08-25..2023-01-26&type=Issues) | [@samrat](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Asamrat+updated%3A2022-08-25..2023-01-26&type=Issues) | [@SylvainCorlay](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3ASylvainCorlay+updated%3A2022-08-25..2023-01-26&type=Issues) | [@Zsailer](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3AZsailer+updated%3A2022-08-25..2023-01-26&type=Issues) ## 8.0.0rc0 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.0.0b3...bf637ed9543198d6dca96d748b0307ed01b16c94)) ### Maintenance and upkeep improvements - Allow releasing from repo [#899](https://github.com/jupyter/jupyter_client/pull/899) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-12-13&to=2022-12-19&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2022-12-13..2022-12-19&type=Issues) ## 8.0.0b3 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.0.0b2...b51a3b5d1a0d1a8ad390c1121506217909da1c4f)) ### Bugs fixed - Allow interrupt during restart of pending kernels [#898](https://github.com/jupyter/jupyter_client/pull/898) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-12-08&to=2022-12-13&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2022-12-08..2022-12-13&type=Issues) ## 8.0.0b2 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.0.0b1...ff33adf784f2bd25814d7ed6ae7c6651cee8376e)) ### Maintenance and upkeep improvements - Fix jupyter_core pinning [#896](https://github.com/jupyter/jupyter_client/pull/896) ([@ophie200](https://github.com/ophie200)) - Adopt ruff and reduce pre-commit usage [#895](https://github.com/jupyter/jupyter_client/pull/895) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-12-05&to=2022-12-08&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2022-12-05..2022-12-08&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adavidbrochart+updated%3A2022-12-05..2022-12-08&type=Issues) | [@ophie200](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aophie200+updated%3A2022-12-05..2022-12-08&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2022-12-05..2022-12-08&type=Issues) ## 8.0.0b1 No merged PRs ## 8.0.0b0 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.0.0a4...e419ff4a651b6ac7cd533023c2dd3bd391de6eb6)) ### Maintenance and upkeep improvements - Use pytest-jupyter [#891](https://github.com/jupyter/jupyter_client/pull/891) ([@blink1073](https://github.com/blink1073)) - Import ensure_async and run_sync from jupyter_core [#889](https://github.com/jupyter/jupyter_client/pull/889) ([@davidbrochart](https://github.com/davidbrochart)) - Use base setup dependency type [#888](https://github.com/jupyter/jupyter_client/pull/888) ([@blink1073](https://github.com/blink1073)) - More CI Cleanup [#886](https://github.com/jupyter/jupyter_client/pull/886) ([@blink1073](https://github.com/blink1073)) - More coverage [#885](https://github.com/jupyter/jupyter_client/pull/885) ([@blink1073](https://github.com/blink1073)) - Clean up workflow and pyproject [#884](https://github.com/jupyter/jupyter_client/pull/884) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-11-16&to=2022-11-29&type=c)) [@arogozhnikov](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aarogozhnikov+updated%3A2022-11-16..2022-11-29&type=Issues) | [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2022-11-16..2022-11-29&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adavidbrochart+updated%3A2022-11-16..2022-11-29&type=Issues) | [@kevin-bates](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Akevin-bates+updated%3A2022-11-16..2022-11-29&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2022-11-16..2022-11-29&type=Issues) ## 8.0.0a4 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.0.0a3...107ccdd06c9b67fc081204ae7c0e7123a17cb0c4)) ### Bugs fixed - Fix connection reconciliation to handle restarts [#882](https://github.com/jupyter/jupyter_client/pull/882) ([@kevin-bates](https://github.com/kevin-bates)) ### Maintenance and upkeep improvements - Add more coverage [#877](https://github.com/jupyter/jupyter_client/pull/877) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-11-15&to=2022-11-16&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2022-11-15..2022-11-16&type=Issues) | [@kevin-bates](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Akevin-bates+updated%3A2022-11-15..2022-11-16&type=Issues) ## 8.0.0a3 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.0.0a2...10f69c9b5ac55b92b651f1f55fa2814f81f3ce51)) ### Bugs fixed - Reconcile connection information [#879](https://github.com/jupyter/jupyter_client/pull/879) ([@kevin-bates](https://github.com/kevin-bates)) ### Maintenance and upkeep improvements - Add coverage config [#876](https://github.com/jupyter/jupyter_client/pull/876) ([@blink1073](https://github.com/blink1073)) - Bump actions/setup-python from 2 to 4 [#874](https://github.com/jupyter/jupyter_client/pull/874) ([@dependabot](https://github.com/dependabot)) - Bump actions/checkout from 2 to 3 [#873](https://github.com/jupyter/jupyter_client/pull/873) ([@dependabot](https://github.com/dependabot)) - Clean up types and remove use of entrypoints [#871](https://github.com/jupyter/jupyter_client/pull/871) ([@blink1073](https://github.com/blink1073)) - Add dependabot [#870](https://github.com/jupyter/jupyter_client/pull/870) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-11-09&to=2022-11-15&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2022-11-09..2022-11-15&type=Issues) | [@dependabot](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adependabot+updated%3A2022-11-09..2022-11-15&type=Issues) | [@kevin-bates](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Akevin-bates+updated%3A2022-11-09..2022-11-15&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2022-11-09..2022-11-15&type=Issues) ## 8.0.0a2 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.0.0a1...268a3c5c892e3e42e76c5ec120a74de10fb04218)) ### Maintenance and upkeep improvements - Use platform dirs in tests [#872](https://github.com/jupyter/jupyter_client/pull/872) ([@blink1073](https://github.com/blink1073)) - Support Python 3.8-3.11 [#866](https://github.com/jupyter/jupyter_client/pull/866) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-10-25&to=2022-11-09&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2022-10-25..2022-11-09&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2022-10-25..2022-11-09&type=Issues) ## 8.0.0a1 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v8.0.0a0...35b66d8a738b6c9629ffead308c9f981bee1148f)) ### Bugs fixed - Workaround for launch bug [#861](https://github.com/jupyter/jupyter_client/pull/861) ([@blink1073](https://github.com/blink1073)) - Defer creation of ready future [#858](https://github.com/jupyter/jupyter_client/pull/858) ([@blink1073](https://github.com/blink1073)) ### Maintenance and upkeep improvements - Fix assertion in `TestSession.test_serialize` [#860](https://github.com/jupyter/jupyter_client/pull/860) ([@samrat](https://github.com/samrat)) - Maintenance cleanup [#856](https://github.com/jupyter/jupyter_client/pull/856) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-10-12&to=2022-10-25&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2022-10-12..2022-10-25&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2022-10-12..2022-10-25&type=Issues) | [@samrat](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Asamrat+updated%3A2022-10-12..2022-10-25&type=Issues) ## 8.0.0a0 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.3.5...06e9cb3fb29a895a3a14e6e39ab524a13bec85ec)) ### Enhancements made - Remove nest-asyncio dependency [#835](https://github.com/jupyter/jupyter_client/pull/835) ([@blink1073](https://github.com/blink1073)) ### Bugs fixed - Fix handling of initial ready promise [#854](https://github.com/jupyter/jupyter_client/pull/854) ([@blink1073](https://github.com/blink1073)) - Use pytest_asyncio fixture [#826](https://github.com/jupyter/jupyter_client/pull/826) ([@davidbrochart](https://github.com/davidbrochart)) ### Maintenance and upkeep improvements - Ignore warnings in prereleases test [#844](https://github.com/jupyter/jupyter_client/pull/844) ([@blink1073](https://github.com/blink1073)) - Use hatch for version [#837](https://github.com/jupyter/jupyter_client/pull/837) ([@blink1073](https://github.com/blink1073)) - Move tests to top level [#834](https://github.com/jupyter/jupyter_client/pull/834) ([@blink1073](https://github.com/blink1073)) - Fix nbconvert downstream test [#827](https://github.com/jupyter/jupyter_client/pull/827) ([@blink1073](https://github.com/blink1073)) ### Documentation improvements - Switch to pydata sphinx theme [#840](https://github.com/jupyter/jupyter_client/pull/840) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-08-25&to=2022-10-12&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2022-08-25..2022-10-12&type=Issues) | [@ccordoba12](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Accordoba12+updated%3A2022-08-25..2022-10-12&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adavidbrochart+updated%3A2022-08-25..2022-10-12&type=Issues) | [@minrk](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aminrk+updated%3A2022-08-25..2022-10-12&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2022-08-25..2022-10-12&type=Issues) | [@Zsailer](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3AZsailer+updated%3A2022-08-25..2022-10-12&type=Issues) ## 7.3.5 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.3.4...bc5ded5439ca55bd6740885eb3a44ca6bc3e2243)) ### Enhancements made - add `AsyncKernelClient` to `doc/api/client.rst` [#819](https://github.com/jupyter/jupyter_client/pull/819) ([@helioz11](https://github.com/helioz11)) ### Bugs fixed - Use tornado 6.2's PeriodicCallback in restarter [#822](https://github.com/jupyter/jupyter_client/pull/822) ([@vidartf](https://github.com/vidartf)) - Make \_stdin_hook_default async [#814](https://github.com/jupyter/jupyter_client/pull/814) ([@davidbrochart](https://github.com/davidbrochart)) ### Maintenance and upkeep improvements - \[pre-commit.ci\] pre-commit autoupdate [#824](https://github.com/jupyter/jupyter_client/pull/824) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - \[pre-commit.ci\] pre-commit autoupdate [#821](https://github.com/jupyter/jupyter_client/pull/821) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - \[pre-commit.ci\] pre-commit autoupdate [#820](https://github.com/jupyter/jupyter_client/pull/820) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - \[pre-commit.ci\] pre-commit autoupdate [#818](https://github.com/jupyter/jupyter_client/pull/818) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - \[pre-commit.ci\] pre-commit autoupdate [#816](https://github.com/jupyter/jupyter_client/pull/816) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - \[pre-commit.ci\] pre-commit autoupdate [#815](https://github.com/jupyter/jupyter_client/pull/815) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - \[pre-commit.ci\] pre-commit autoupdate [#812](https://github.com/jupyter/jupyter_client/pull/812) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - \[pre-commit.ci\] pre-commit autoupdate [#810](https://github.com/jupyter/jupyter_client/pull/810) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - \[pre-commit.ci\] pre-commit autoupdate [#809](https://github.com/jupyter/jupyter_client/pull/809) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - \[pre-commit.ci\] pre-commit autoupdate [#807](https://github.com/jupyter/jupyter_client/pull/807) ([@pre-commit-ci](https://github.com/pre-commit-ci)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-06-08&to=2022-08-25&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2022-06-08..2022-08-25&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adavidbrochart+updated%3A2022-06-08..2022-08-25&type=Issues) | [@helioz11](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ahelioz11+updated%3A2022-06-08..2022-08-25&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2022-06-08..2022-08-25&type=Issues) | [@vidartf](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Avidartf+updated%3A2022-06-08..2022-08-25&type=Issues) ## 7.3.4 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.3.3...ca4cb2d6a4b95a6925de85a47b323d2235032c74)) ### Bugs fixed - Revert latest changes to `ThreadedZMQSocketChannel` because they break Qtconsole [#803](https://github.com/jupyter/jupyter_client/pull/803) ([@ccordoba12](https://github.com/ccordoba12)) ### Maintenance and upkeep improvements - Fix sphinx 5.0 support [#804](https://github.com/jupyter/jupyter_client/pull/804) ([@blink1073](https://github.com/blink1073)) - \[pre-commit.ci\] pre-commit autoupdate [#799](https://github.com/jupyter/jupyter_client/pull/799) ([@pre-commit-ci](https://github.com/pre-commit-ci)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-06-07&to=2022-06-08&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2022-06-07..2022-06-08&type=Issues) | [@ccordoba12](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Accordoba12+updated%3A2022-06-07..2022-06-08&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2022-06-07..2022-06-08&type=Issues) ## 7.3.3 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.3.2...37ca37d865db260e7da6fa85339be450d6fd3c3c)) ### Bugs fixed - Add local-provisioner entry point to pyproject.toml Fixes #800 [#801](https://github.com/jupyter/jupyter_client/pull/801) ([@utkonos](https://github.com/utkonos)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-06-06&to=2022-06-07&type=c)) [@utkonos](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Autkonos+updated%3A2022-06-06..2022-06-07&type=Issues) ## 7.3.2 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.3.1...c81771416d9e09e0e92be799f3e8549d0db57e43)) ### Enhancements made - Correct `Any` type annotations. [#791](https://github.com/jupyter/jupyter_client/pull/791) ([@joouha](https://github.com/joouha)) ### Maintenance and upkeep improvements - \[pre-commit.ci\] pre-commit autoupdate [#792](https://github.com/jupyter/jupyter_client/pull/792) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - Use hatch backend [#789](https://github.com/jupyter/jupyter_client/pull/789) ([@blink1073](https://github.com/blink1073)) - \[pre-commit.ci\] pre-commit autoupdate [#788](https://github.com/jupyter/jupyter_client/pull/788) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - Use flit build backend [#781](https://github.com/jupyter/jupyter_client/pull/781) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-05-08&to=2022-06-06&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2022-05-08..2022-06-06&type=Issues) | [@joouha](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ajoouha+updated%3A2022-05-08..2022-06-06&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2022-05-08..2022-06-06&type=Issues) ## 7.3.1 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.3.0...4df8a48071649d3488a880e61293efd26b7aff1d)) ### Bugs fixed - Check that channels exist before asking if they are alive [#785](https://github.com/jupyter/jupyter_client/pull/785) ([@ccordoba12](https://github.com/ccordoba12)) - Unicode error correction using Error Handler [#779](https://github.com/jupyter/jupyter_client/pull/779) ([@hxawax](https://github.com/hxawax)) ### Maintenance and upkeep improvements - Allow bot PRs to be automatically labeled [#784](https://github.com/jupyter/jupyter_client/pull/784) ([@blink1073](https://github.com/blink1073)) - \[pre-commit.ci\] pre-commit autoupdate [#783](https://github.com/jupyter/jupyter_client/pull/783) ([@pre-commit-ci](https://github.com/pre-commit-ci)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-04-25&to=2022-05-08&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2022-04-25..2022-05-08&type=Issues) | [@ccordoba12](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Accordoba12+updated%3A2022-04-25..2022-05-08&type=Issues) | [@hxawax](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ahxawax+updated%3A2022-04-25..2022-05-08&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2022-04-25..2022-05-08&type=Issues) ## 7.3.0 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.2.2...fa597d9cdcdc277abda2c3cab4aeee1593d3a9e2)) ### Bugs fixed - Fix shutdown and cleanup behavior [#772](https://github.com/jupyter/jupyter_client/pull/772) ([@blink1073](https://github.com/blink1073)) ### Maintenance and upkeep improvements - \[pre-commit.ci\] pre-commit autoupdate [#773](https://github.com/jupyter/jupyter_client/pull/773) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - \[pre-commit.ci\] pre-commit autoupdate [#770](https://github.com/jupyter/jupyter_client/pull/770) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - Improve mypy config [#769](https://github.com/jupyter/jupyter_client/pull/769) ([@blink1073](https://github.com/blink1073)) - Clean up pre-commit [#768](https://github.com/jupyter/jupyter_client/pull/768) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-04-07&to=2022-04-25&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2022-04-07..2022-04-25&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2022-04-07..2022-04-25&type=Issues) ## 7.2.2 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.2.1...01b2095d96c81c56edf8f5df44e12e476b2bcd87)) ### Maintenance and upkeep improvements - Include py.typed file [#766](https://github.com/jupyter/jupyter_client/pull/766) ([@blink1073](https://github.com/blink1073)) - \[pre-commit.ci\] pre-commit autoupdate [#765](https://github.com/jupyter/jupyter_client/pull/765) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - More Cleanup [#764](https://github.com/jupyter/jupyter_client/pull/764) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-03-30&to=2022-04-07&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2022-03-30..2022-04-07&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adavidbrochart+updated%3A2022-03-30..2022-04-07&type=Issues) | [@echarles](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aecharles+updated%3A2022-03-30..2022-04-07&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Apre-commit-ci+updated%3A2022-03-30..2022-04-07&type=Issues) ## 7.2.1 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.2.0...683e8dd96ecd52da48a85f67e4ae31d85f1c6616)) ### Maintenance and upkeep improvements - Handle Warnings [#760](https://github.com/jupyter/jupyter_client/pull/760) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-03-29&to=2022-03-30&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2022-03-29..2022-03-30&type=Issues) ## 7.2.0 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.1.2...098de3e51bd4ee1b1a3aded889e8f109ac5eff89)) ### Enhancements made - Update consoleapp.py [#733](https://github.com/jupyter/jupyter_client/pull/733) ([@you-n-g](https://github.com/you-n-g)) ### Bugs fixed - Json packer: handle TypeError and fallback to old json_clean [#752](https://github.com/jupyter/jupyter_client/pull/752) ([@martinRenou](https://github.com/martinRenou)) - Prefer sending signals to kernel process group [#743](https://github.com/jupyter/jupyter_client/pull/743) ([@kevin-bates](https://github.com/kevin-bates)) ### Maintenance and upkeep improvements - Mock is not needed [#758](https://github.com/jupyter/jupyter_client/pull/758) ([@hroncok](https://github.com/hroncok)) - Add pytest opts and clean up workflows [#757](https://github.com/jupyter/jupyter_client/pull/757) ([@blink1073](https://github.com/blink1073)) - Clean up dependency handling [#750](https://github.com/jupyter/jupyter_client/pull/750) ([@blink1073](https://github.com/blink1073)) - Use built in run cancellation [#742](https://github.com/jupyter/jupyter_client/pull/742) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-01-21&to=2022-03-28&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2022-01-21..2022-03-28&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adavidbrochart+updated%3A2022-01-21..2022-03-28&type=Issues) | [@echarles](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aecharles+updated%3A2022-01-21..2022-03-28&type=Issues) | [@hroncok](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ahroncok+updated%3A2022-01-21..2022-03-28&type=Issues) | [@kevin-bates](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Akevin-bates+updated%3A2022-01-21..2022-03-28&type=Issues) | [@martinRenou](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3AmartinRenou+updated%3A2022-01-21..2022-03-28&type=Issues) | [@you-n-g](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ayou-n-g+updated%3A2022-01-21..2022-03-28&type=Issues) ## 7.1.2 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.1.1...2c9fbf499f63d4287851021b8f8efc9d3c0e336e)) ### Bugs fixed - Await `kernel.ready` in `_async_shutdown_kernel` [#740](https://github.com/jupyter/jupyter_client/pull/740) ([@jtpio](https://github.com/jtpio)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2022-01-14&to=2022-01-21&type=c)) [@jtpio](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ajtpio+updated%3A2022-01-14..2022-01-21&type=Issues) | [@Zsailer](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3AZsailer+updated%3A2022-01-14..2022-01-21&type=Issues) ## 7.1.1 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.1.0...4428715b65741ddccac9305d318d4ace08fa711a)) ### Enhancements made - Further improvements to pending kernels management [#732](https://github.com/jupyter/jupyter_client/pull/732) ([@Zsailer](https://github.com/Zsailer)) ### Maintenance and upkeep improvements - Test against all kernels in jupyter kernel test and clean up CI [#731](https://github.com/jupyter/jupyter_client/pull/731) ([@blink1073](https://github.com/blink1073)) - Replace master with main [#729](https://github.com/jupyter/jupyter_client/pull/729) ([@davidbrochart](https://github.com/davidbrochart)) ### Documentation improvements - \[DOC\] improve kernel provisioner doc [#730](https://github.com/jupyter/jupyter_client/pull/730) ([@abzymeinsjtu](https://github.com/abzymeinsjtu)) - add changelog for message spec [#525](https://github.com/jupyter/jupyter_client/pull/525) ([@minrk](https://github.com/minrk)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2021-11-22&to=2022-01-14&type=c)) [@abzymeinsjtu](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aabzymeinsjtu+updated%3A2021-11-22..2022-01-14&type=Issues) | [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2021-11-22..2022-01-14&type=Issues) | [@BoPeng](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3ABoPeng+updated%3A2021-11-22..2022-01-14&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adavidbrochart+updated%3A2021-11-22..2022-01-14&type=Issues) | [@minrk](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aminrk+updated%3A2021-11-22..2022-01-14&type=Issues) | [@rgbkrk](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Argbkrk+updated%3A2021-11-22..2022-01-14&type=Issues) | [@willingc](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Awillingc+updated%3A2021-11-22..2022-01-14&type=Issues) | [@Zsailer](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3AZsailer+updated%3A2021-11-22..2022-01-14&type=Issues) ## 7.1.0 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.0.6...6b76603578fd3a76fd577d3319393c9933f53ab0)) ### Enhancements made - Add support for pending kernels [#712](https://github.com/jupyter/jupyter_client/pull/712) ([@blink1073](https://github.com/blink1073)) ### Bugs fixed - Improve restarter logic [#717](https://github.com/jupyter/jupyter_client/pull/717) ([@vidartf](https://github.com/vidartf)) - Set sticky bit only on the directory [#711](https://github.com/jupyter/jupyter_client/pull/711) ([@ci4ic4](https://github.com/ci4ic4)) ### Maintenance and upkeep improvements - Enforce labels on PRs [#720](https://github.com/jupyter/jupyter_client/pull/720) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2021-10-01&to=2021-11-22&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2021-10-01..2021-11-22&type=Issues) | [@ci4ic4](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aci4ic4+updated%3A2021-10-01..2021-11-22&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adavidbrochart+updated%3A2021-10-01..2021-11-22&type=Issues) | [@kevin-bates](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Akevin-bates+updated%3A2021-10-01..2021-11-22&type=Issues) | [@vidartf](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Avidartf+updated%3A2021-10-01..2021-11-22&type=Issues) ## 7.0.6 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.0.5...58b11df0ecb729effacc59ce28e9f431fa9c6a4d)) ### Bugs fixed - Fallback to the old ipykernel "json_clean" if we are not able to serialize a JSON message [#708](https://github.com/jupyter/jupyter_client/pull/708) ([@martinRenou](https://github.com/martinRenou)) ### Other merged PRs - Add test for serializing bytes [#707](https://github.com/jupyter/jupyter_client/pull/707) ([@martinRenou](https://github.com/martinRenou)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2021-09-29&to=2021-10-01&type=c)) [@martinRenou](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3AmartinRenou+updated%3A2021-09-29..2021-10-01&type=Issues) ## 7.0.5 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.0.4...e379bf91fea63526b9c4cc6679e7953a325b540c)) ### Bugs fixed - avoid use of deprecated zmq.utils.jsonapi [#703](https://github.com/jupyter/jupyter_client/pull/703) ([@minrk](https://github.com/minrk)) ### Maintenance and upkeep improvements - Use logger.warning instead of deprecated warn method [#700](https://github.com/jupyter/jupyter_client/pull/700) ([@kevin-bates](https://github.com/kevin-bates)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2021-09-28&to=2021-09-29&type=c)) [@kevin-bates](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Akevin-bates+updated%3A2021-09-28..2021-09-29&type=Issues) | [@minrk](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aminrk+updated%3A2021-09-28..2021-09-29&type=Issues) ## 7.0.4 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.0.3...5b570152c0d88dd5e0ff1711c75fc9987ef76256)) ### Bugs fixed - Fix json_default so that it's closer to what ipykernel had before [#698](https://github.com/jupyter/jupyter_client/pull/698) ([@martinRenou](https://github.com/martinRenou)) - Clean up the pending task [#697](https://github.com/jupyter/jupyter_client/pull/697) ([@shingo78](https://github.com/shingo78)) - fix kernel can only restart once issue [#695](https://github.com/jupyter/jupyter_client/pull/695) ([@mofanke](https://github.com/mofanke)) - Prevent failure if kernel is not found when shutting it down [#694](https://github.com/jupyter/jupyter_client/pull/694) ([@martinRenou](https://github.com/martinRenou)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2021-09-16&to=2021-09-28&type=c)) [@davidbrochart](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adavidbrochart+updated%3A2021-09-16..2021-09-28&type=Issues) | [@martinRenou](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3AmartinRenou+updated%3A2021-09-16..2021-09-28&type=Issues) | [@mofanke](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Amofanke+updated%3A2021-09-16..2021-09-28&type=Issues) | [@shingo78](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ashingo78+updated%3A2021-09-16..2021-09-28&type=Issues) ## 7.0.3 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.0.2...e2e854c445d697ae1c188171ea0731532b6ac0d9)) ### Bugs fixed - Address missing `local-provisioner` scenario [#692](https://github.com/jupyter/jupyter_client/pull/692) ([@kevin-bates](https://github.com/kevin-bates)) - use `load_connection_info(info)` when constructing a blocking client [#688](https://github.com/jupyter/jupyter_client/pull/688) ([@minrk](https://github.com/minrk)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2021-08-30&to=2021-09-16&type=c)) [@kevin-bates](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Akevin-bates+updated%3A2021-08-30..2021-09-16&type=Issues) | [@minrk](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aminrk+updated%3A2021-08-30..2021-09-16&type=Issues) ## 7.0.2 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.0.1...b2a23d8f8b4f24a2bc908b6d95047242f4da87cd)) ### Bugs fixed - Don't set event loop policy on Windows at import time [#686](https://github.com/jupyter/jupyter_client/pull/686) ([@minrk](https://github.com/minrk)) ### Documentation improvements - Improve migration guide [#685](https://github.com/jupyter/jupyter_client/pull/685) ([@davidbrochart](https://github.com/davidbrochart)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2021-08-20&to=2021-08-30&type=c)) [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2021-08-20..2021-08-30&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adavidbrochart+updated%3A2021-08-20..2021-08-30&type=Issues) | [@dhirschfeld](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adhirschfeld+updated%3A2021-08-20..2021-08-30&type=Issues) | [@jankatins](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ajankatins+updated%3A2021-08-20..2021-08-30&type=Issues) | [@kevin-bates](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Akevin-bates+updated%3A2021-08-20..2021-08-30&type=Issues) | [@minrk](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aminrk+updated%3A2021-08-20..2021-08-30&type=Issues) | [@takluyver](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Atakluyver+updated%3A2021-08-20..2021-08-30&type=Issues) | [@yuvipanda](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ayuvipanda+updated%3A2021-08-20..2021-08-30&type=Issues) ## 7.0.1 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/v7.0.0...0ce9f293ea574d61cae438469df5e53298713b63)) ### Merged PRs - Use formal method names when called internally [#683](https://github.com/jupyter/jupyter_client/pull/683) ([@kevin-bates](https://github.com/kevin-bates)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2021-08-19&to=2021-08-20&type=c)) [@kevin-bates](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Akevin-bates+updated%3A2021-08-19..2021-08-20&type=Issues) ## 7.0.0 ([Full Changelog](https://github.com/jupyter/jupyter_client/compare/26a16c0c91e245f7403aa27a812fee5e905d2964...31750bc87baf88377bcc6967e227b650b38fa872)) ### Summary The 7.0 release brings a major feature in [Kernel Provisioners](https://github.com/jupyter/jupyter_client/blob/master/docs/provisioning.rst), which enable the ability for third parties to manage the lifecycle of a kernel's runtime environment. Being a major release, there are some backward incompatible changes. Please see the [migration guide](https://jupyter-client.readthedocs.io/en/latest/migration.html) for further details. ### Enhancements made - Kernel Provisioning - initial implementation [#612](https://github.com/jupyter/jupyter_client/pull/612) ([@kevin-bates](https://github.com/kevin-bates)) ### Bugs fixed - Fix up some async method aliases in KernelManager [#670](https://github.com/jupyter/jupyter_client/pull/670) ([@kevin-bates](https://github.com/kevin-bates)) - Support `answer_yes` when removing kernel specs [#659](https://github.com/jupyter/jupyter_client/pull/659) ([@davidbrochart](https://github.com/davidbrochart)) - Include process ID in message ID [#655](https://github.com/jupyter/jupyter_client/pull/655) ([@takluyver](https://github.com/takluyver)) - Fix qtconsole issues [#638](https://github.com/jupyter/jupyter_client/pull/638) ([@davidbrochart](https://github.com/davidbrochart)) ### Maintenance and upkeep improvements - Added debugger key in `kernel_info_reply` [#486](https://github.com/jupyter/jupyter_client/pull/486) ([@JohanMabille](https://github.com/JohanMabille)) - Prepare for use with Jupyter Releaser [#676](https://github.com/jupyter/jupyter_client/pull/676) ([@afshin](https://github.com/afshin)) - Force install `jupyter_client` master [#675](https://github.com/jupyter/jupyter_client/pull/675) ([@davidbrochart](https://github.com/davidbrochart)) - Fix project name [#674](https://github.com/jupyter/jupyter_client/pull/674) ([@vidartf](https://github.com/vidartf)) - Rename trait to `allowed_kernelspecs` [#672](https://github.com/jupyter/jupyter_client/pull/672) ([@blink1073](https://github.com/blink1073)) - Remove block parameter from `get_msg()` [#671](https://github.com/jupyter/jupyter_client/pull/671) ([@davidbrochart](https://github.com/davidbrochart)) - Only import `nest_asyncio` locally [#665](https://github.com/jupyter/jupyter_client/pull/665) ([@SylvainCorlay](https://github.com/SylvainCorlay)) - Use a default serializer that is not only for date types [#664](https://github.com/jupyter/jupyter_client/pull/664) ([@martinRenou](https://github.com/martinRenou)) - Updated `debug_info_response` [#657](https://github.com/jupyter/jupyter_client/pull/657) ([@JohanMabille](https://github.com/JohanMabille)) - Do not block on exit [#651](https://github.com/jupyter/jupyter_client/pull/651) ([@impact27](https://github.com/impact27)) - Update test kernel with native coroutine, remove `async_generator` dependency [#646](https://github.com/jupyter/jupyter_client/pull/646) ([@kevin-bates](https://github.com/kevin-bates)) - `setup.py` and CI improvements [#645](https://github.com/jupyter/jupyter_client/pull/645) ([@dolfinus](https://github.com/dolfinus)) - Test downstream projects [#644](https://github.com/jupyter/jupyter_client/pull/644) ([@davidbrochart](https://github.com/davidbrochart)) - Remove deprecations in kernel manager [#643](https://github.com/jupyter/jupyter_client/pull/643) ([@kevin-bates](https://github.com/kevin-bates)) - Add `block=True` back to `get_msg()` [#641](https://github.com/jupyter/jupyter_client/pull/641) ([@davidbrochart](https://github.com/davidbrochart)) - Pin `python>=3.6.1` [#636](https://github.com/jupyter/jupyter_client/pull/636) ([@davidbrochart](https://github.com/davidbrochart)) - Use `pre-commit` [#631](https://github.com/jupyter/jupyter_client/pull/631) ([@davidbrochart](https://github.com/davidbrochart)) - Attempt CI with `ipykernel` 6.0 prerelease [#629](https://github.com/jupyter/jupyter_client/pull/629) ([@SylvainCorlay](https://github.com/SylvainCorlay)) - Make `KernelManager` subclass tests DRY [#628](https://github.com/jupyter/jupyter_client/pull/628) ([@davidbrochart](https://github.com/davidbrochart)) - Add tests to ensure MultiKernelManager subclass methods are called [#627](https://github.com/jupyter/jupyter_client/pull/627) ([@kevin-bates](https://github.com/kevin-bates)) - Add type annotations, refactor sync/async [#623](https://github.com/jupyter/jupyter_client/pull/623) ([@davidbrochart](https://github.com/davidbrochart)) ### Documentation improvements - Create migration guide [#681](https://github.com/jupyter/jupyter_client/pull/681) ([@blink1073](https://github.com/blink1073)) - Update changelog for 7.0.0rc0 [#673](https://github.com/jupyter/jupyter_client/pull/673) ([@blink1073](https://github.com/blink1073)) - Added documentation for `richInspectVariables` request [#654](https://github.com/jupyter/jupyter_client/pull/654) ([@JohanMabille](https://github.com/JohanMabille)) - Change to `edit_magic` payload [#652](https://github.com/jupyter/jupyter_client/pull/652) ([@yitzchak](https://github.com/yitzchak)) - Added missing documentation for the inspectVariables request and resp… [#649](https://github.com/jupyter/jupyter_client/pull/649) ([@JohanMabille](https://github.com/JohanMabille)) - Add status field to other replies in documentation [#648](https://github.com/jupyter/jupyter_client/pull/648) ([@yitzchak](https://github.com/yitzchak)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/jupyter/jupyter_client/graphs/contributors?from=2021-03-14&to=2021-08-16&type=c)) [@afshin](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aafshin+updated%3A2021-03-14..2021-08-16&type=Issues) | [@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ablink1073+updated%3A2021-03-14..2021-08-16&type=Issues) | [@Carreau](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3ACarreau+updated%3A2021-03-14..2021-08-16&type=Issues) | [@ccordoba12](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Accordoba12+updated%3A2021-03-14..2021-08-16&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adavidbrochart+updated%3A2021-03-14..2021-08-16&type=Issues) | [@dhirschfeld](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adhirschfeld+updated%3A2021-03-14..2021-08-16&type=Issues) | [@dolfinus](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Adolfinus+updated%3A2021-03-14..2021-08-16&type=Issues) | [@echarles](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aecharles+updated%3A2021-03-14..2021-08-16&type=Issues) | [@impact27](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aimpact27+updated%3A2021-03-14..2021-08-16&type=Issues) | [@JohanMabille](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3AJohanMabille+updated%3A2021-03-14..2021-08-16&type=Issues) | [@kevin-bates](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Akevin-bates+updated%3A2021-03-14..2021-08-16&type=Issues) | [@martinRenou](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3AmartinRenou+updated%3A2021-03-14..2021-08-16&type=Issues) | [@mattip](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Amattip+updated%3A2021-03-14..2021-08-16&type=Issues) | [@minrk](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Aminrk+updated%3A2021-03-14..2021-08-16&type=Issues) | [@MSeal](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3AMSeal+updated%3A2021-03-14..2021-08-16&type=Issues) | [@SylvainCorlay](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3ASylvainCorlay+updated%3A2021-03-14..2021-08-16&type=Issues) | [@takluyver](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Atakluyver+updated%3A2021-03-14..2021-08-16&type=Issues) | [@vidartf](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Avidartf+updated%3A2021-03-14..2021-08-16&type=Issues) | [@yitzchak](https://github.com/search?q=repo%3Ajupyter%2Fjupyter_client+involves%3Ayitzchak+updated%3A2021-03-14..2021-08-16&type=Issues) ## 6.2.0 - Yanked (PyPI) and marked as broken (conda) ## 6.1.13 - Yanked (PyPI) and marked as broken (conda) ## 6.1.12 - Shutdown request sequence has been modified to be more graceful, it now is preceded by interrupt, and will also send a `SIGTERM` before forcibly killing the kernel. [#620](https://github.com/jupyter/jupyter_client/pull/620) - Removal of `ipython_genutils` as a dependency. It was implicit before; but required by at least traitlets thus avoiding issues. We are working on completely removing it from all jupyter dependencies; as it might lead to issues packaging for Python 3.10, and was mostly used for compatibility with python 2. ([#620](https://github.com/jupyter/jupyter_client/pull/620), [#605](https://github.com/jupyter/jupyter_client/pull/605)) - Address a race condition between `shutdown_kernel` and restarter. ([#607](https://github.com/jupyter/jupyter_client/pull/607).) See the [full list of pull-requests](https://github.com/jupyter/jupyter_client/milestone/27?closed=1) ## 6.1.11 - Move jedi pinning to test requirements [#599](https://github.com/jupyter/jupyter_client/pull/599) ## 6.1.10 - Add change parameter needed for observer method of kernel_spec_manager trait [#598](https://github.com/jupyter/jupyter_client/pull/598) ## 6.1.9 - Pin jedi\<=0.17.2 [#596](https://github.com/jupyter/jupyter_client/pull/596) ## 6.1.8 - Doc updates ([#563](https://github.com/jupyter/jupyter_client/pull/563), [#564](https://github.com/jupyter/jupyter_client/pull/564), [#587](https://github.com/jupyter/jupyter_client/pull/587)) - Fix path to the connection file [#568](https://github.com/jupyter/jupyter_client/pull/568) - Code cleanup ([#574](https://github.com/jupyter/jupyter_client/pull/574), [#579](https://github.com/jupyter/jupyter_client/pull/579)) - Silence kill_kernel when no process is present [#576](https://github.com/jupyter/jupyter_client/pull/576) - Remove extra_env and corresponding test [#581](https://github.com/jupyter/jupyter_client/pull/581) - Add documentation dependencies to setup.py [#582](https://github.com/jupyter/jupyter_client/pull/582) - Fix for Windows localhost IP addresses [#584](https://github.com/jupyter/jupyter_client/pull/584) - Drop Travis CI, add GitHub Actions [#586](https://github.com/jupyter/jupyter_client/pull/586) - Adapt KernelManager.\_kernel_spec_manager_changed to observe [#588](https://github.com/jupyter/jupyter_client/pull/588) - Allow use ~/ in the kernel's command or its arguments [#589](https://github.com/jupyter/jupyter_client/pull/589) - Change wait_for_ready logic [#592](https://github.com/jupyter/jupyter_client/pull/592) - Fix test_session with msgpack v1 [#594](https://github.com/jupyter/jupyter_client/pull/594) ## 6.1.6 - Removed warnings in more cases for KernelManagers that use new cleanup method [#560](https://github.com/jupyter/jupyter_client/pull/560) - Some improved tests with a conversion to pytest pattern [#561](https://github.com/jupyter/jupyter_client/pull/561) ## 6.1.5 - Gracefully Close ZMQ Context upon kernel shutdown to fix memory leak [#548](https://github.com/jupyter/jupyter_client/pull/548) - Fix for chained exceptions to preserve stacks ([#552](https://github.com/jupyter/jupyter_client/pull/552), [#554](https://github.com/jupyter/jupyter_client/pull/554)) - Fix start_kernel error when passing kernel_id [#547](https://github.com/jupyter/jupyter_client/pull/547) - Update to releasing docs [#543](https://github.com/jupyter/jupyter_client/pull/543) ## 6.1.4 (Deleted release with incorrect local files) ## 6.1.3 - Add AsyncKernelClient client_class to AsyncKernelManager [#542](https://github.com/jupyter/jupyter_client/pull/542) - Doc fix for xeus hyperlinks [#540](https://github.com/jupyter/jupyter_client/pull/540) - Doc typo fix [#539](https://github.com/jupyter/jupyter_client/pull/539) ## 6.1.2 - Fixed a bug causing clients to sometimes hang after a stop call was made [#536](https://github.com/jupyter/jupyter_client/pull/536) ## 6.1.1 - Subprocess kill action fix for async execution [#535](https://github.com/jupyter/jupyter_client/pull/535) - Doc fix for xeus kernel list [#534](https://github.com/jupyter/jupyter_client/pull/534) ## 6.1.0 This release includes support for asyncio patterns! Downstream tools should soon have releases to additionally support async patterns. - AsyncKernelManager and AsyncMultiKernelManager are now available for async jupyter_client interactions ([#528](https://github.com/jupyter/jupyter_client/pull/528), [#529](https://github.com/jupyter/jupyter_client/pull/529)) - Removed unused sphinx dependency ([#518](https://github.com/jupyter/jupyter_client/pull/518), [#518](https://github.com/jupyter/jupyter_client/pull/518)). - Added install instructions for pip to documentation [#521](https://github.com/jupyter/jupyter_client/pull/521) - Improved docs around version protocol and messaging ([#522](https://github.com/jupyter/jupyter_client/pull/522), [#526](https://github.com/jupyter/jupyter_client/pull/526)) ## 6.0.0 The git history had to be reworked heavily in merging 5.x and master, so a link to all the changes at once in github had been left out as it's just confusing. An exciting change in this release is some async support (huge thanks to @davidbrochart for doing most of the work)! See linked PR below for more details, we're working on integrating this into nbclient as well in the near future. New Features: - Added async API [#506](https://github.com/jupyter/jupyter_client/pull/506) Changes: - Python 3.8 testing and support added [#509](https://github.com/jupyter/jupyter_client/pull/509) - Session.msg_id optimization [#493](https://github.com/jupyter/jupyter_client/pull/493) - Only cache ports if the cache_ports flag is set to True [#492](https://github.com/jupyter/jupyter_client/pull/492) - Removed direct dependency on pywin32 as this is now in jupyter core [#489](https://github.com/jupyter/jupyter_client/pull/489) Fixes: - Prevent two kernels to have the same ports [#490](https://github.com/jupyter/jupyter_client/pull/490) Docs: - Document the handling of error in do_execute [#500](https://github.com/jupyter/jupyter_client/pull/500) Breaking changes: - Dropped support for Python 2.7! ## 5.3.5 - Backported memory leak fix [#548](https://github.com/jupyter/jupyter_client/pull/548) [#555](https://github.com/jupyter/jupyter_client/pull/555). ## 5.3.4 - Changed secure_write to be imported from jupyter_core with fix for extended usernames in Windows [#483](https://github.com/jupyter/jupyter_client/pull/483). ## 5.3.3 - Fixed issue with non-english windows permissions [#478](https://github.com/jupyter/jupyter_client/pull/478). Potential issue still open in use with jupyerlab. ## 5.3.2 - Important files creation now checks umask permissions [#469](https://github.com/jupyter/jupyter_client/pull/469). ## 5.3.1 - Fix bug with control channel socket introduced in 5.3.0 [#456](https://github.com/jupyter/jupyter_client/pull/456). ## 5.3.0 [5.3.0 on GitHub](https://github.com/jupyter/jupyter_client/milestones/5.3.0) New Features: - Multiprocessing and Threading support [#437](https://github.com/jupyter/jupyter_client/pull/437) and [#450](https://github.com/jupyter/jupyter_client/pull/450) - Setup package long_description [#411](https://github.com/jupyter/jupyter_client/pull/411) Changes: - Control channel now in the public API [#447](https://github.com/jupyter/jupyter_client/pull/447) - Closing Jupyter Client is now faster [#420](https://github.com/jupyter/jupyter_client/pull/420) - Pip support improvements [#421](https://github.com/jupyter/jupyter_client/pull/421) Breaking changes: - Dropped support for Python 3.3 and 3.4 (upstream packages dropped support already) ## 5.2.4 [5.2.4 on GitHub](https://github.com/jupyter/jupyter_client/milestones/5.2.4) - Prevent creating new console windows on Windows [#346](https://github.com/jupyter/jupyter_client/pull/346) - Fix interrupts on Python 3.7 on Windows [#408](https://github.com/jupyter/jupyter_client/pull/408) ## 5.2.3 [5.2.3 on GitHub](https://github.com/jupyter/jupyter_client/milestones/5.2.3) - Fix hang on close in `.ThreadedKernelClient` (used in QtConsole) when using tornado with asyncio (default behavior of tornado 5, see [#352](https://github.com/jupyter/jupyter_client/pull/352)). - Fix errors when using deprecated `.KernelManager.kernel_cmd` ([#343](https://github.com/jupyter/jupyter_client/pull/343), [#344](https://github.com/jupyter/jupyter_client/pull/344)). ## 5.2.2 [5.2.2 on GitHub](https://github.com/jupyter/jupyter_client/milestones/5.2.2) - Fix `.KernelSpecManager.get_all_specs` method in subclasses that only override `.KernelSpecManager.find_kernel_specs` and `.KernelSpecManager.get_kernel_spec`. See [#338](https://github.com/jupyter/jupyter_client/issues/338) and [#339](https://github.com/jupyter/jupyter_client/pull/339). - Eliminate occasional error messages during process exit [#336](https://github.com/jupyter/jupyter_client/pull/336). - Improve error message when attempting to bind on invalid address [#330](https://github.com/jupyter/jupyter_client/pull/330). - Add missing direct dependency on tornado [#323](https://github.com/jupyter/jupyter_client/pull/323). ## 5.2.1 [5.2.1 on GitHub](https://github.com/jupyter/jupyter_client/milestones/5.2.1) - Add parenthesis to conditional pytest requirement to work around a bug in the `wheel` package, that generate a `.whl` which otherwise always depends on `pytest` see [#324](https://github.com/jupyter/jupyter_client/issues/324)and [#325](https://github.com/jupyter/jupyter_client/pull/325). ## 5.2 [5.2 on GitHub](https://github.com/jupyter/jupyter_client/milestones/5.2) - Define Jupyter protocol version 5.3: - Kernels can now opt to be interrupted by a message sent on the control channel instead of a system signal. See `kernelspecs` and `msging_interrupt` [#294](https://github.com/jupyter/jupyter_client/pull/294). - New `jupyter kernel` command to launch an installed kernel by name [#240](https://github.com/jupyter/jupyter_client/pull/240). - Kernelspecs where the command starts with e.g. `python3` or `python3.6`---matching the version `jupyter_client` is running on---are now launched with the same Python executable as the launching process [#306](https://github.com/jupyter/jupyter_client/pull/306). This extends the special handling of `python` added in 5.0. - Command line arguments specified by a kernelspec can now include `{resource_dir}`, which will be substituted with the kernelspec resource directory path when the kernel is launched [#289](https://github.com/jupyter/jupyter_client/pull/289). - Kernelspecs now have an optional `metadata` field to hold arbitrary metadata about kernels---see `kernelspecs` [#274](https://github.com/jupyter/jupyter_client/pull/274). - Make the `KernelRestarter` class used by a `KernelManager` configurable [#290](https://github.com/jupyter/jupyter_client/pull/290). - When killing a kernel on Unix, kill its process group [#314](https://github.com/jupyter/jupyter_client/pull/314). - If a kernel dies soon after starting, reassign random ports before restarting it, in case one of the previously chosen ports has been bound by another process [#279](https://github.com/jupyter/jupyter_client/pull/279). - Avoid unnecessary filesystem operations when finding a kernelspec with `.KernelSpecManager.get_kernel_spec` [#311](https://github.com/jupyter/jupyter_client/pull/311). - `.KernelSpecManager.get_all_specs` will no longer raise an exception on encountering an invalid `kernel.json` file. It will raise a warning and continue [#310](https://github.com/jupyter/jupyter_client/pull/310). - Check for non-contiguous buffers before trying to send them through ZMQ [#258](https://github.com/jupyter/jupyter_client/pull/258). - Compatibility with upcoming Tornado version 5.0 [#304](https://github.com/jupyter/jupyter_client/pull/304). - Simplify setup code by always using setuptools [#284](https://github.com/jupyter/jupyter_client/pull/284). - Soften warnings when setting the sticky bit on runtime files fails [#286](https://github.com/jupyter/jupyter_client/pull/286). - Various corrections and improvements to documentation. ## 5.1 [5.1 on GitHub](https://github.com/jupyter/jupyter_client/milestones/5.1) - Define Jupyter protocol version 5.2, resolving ambiguity of `cursor_pos` field in the presence of unicode surrogate pairs. ::: {.seealso} `cursor_pos_unicode_note` ::: - Add `Session.clone` for making a copy of a Session object without sharing the digest history. Reusing a single Session object to connect multiple sockets to the same IOPub peer can cause digest collisions. - Avoid global references preventing garbage collection of background threads. ## 5.0 ### 5.0.1 [5.0.1 on GitHub](https://github.com/jupyter/jupyter_client/milestones/5.0.1) - Update internal protocol version number to 5.1, which should have been done in 5.0.0. ### 5.0.0 [5.0.0 on GitHub](https://github.com/jupyter/jupyter_client/milestones/5.0) New features: - Implement Jupyter protocol version 5.1. - Introduce `jupyter run` command for running scripts with a kernel, for instance: ``` jupyter run --kernel python3 myscript.py ``` - New method `.BlockingKernelClient.execute_interactive` for running code and capturing or redisplaying its output. - New `KernelManager.shutdown_wait_time` configurable for adjusting the time for a kernel manager to wait after politely requesting shutdown before it resorts to forceful termination. Fixes: - Set sticky bit on connection-file directory to avoid getting cleaned up. - `jupyter_client.launcher.launch_kernel` passes through additional options to the underlying Popen, matching `KernelManager.start_kernel`. - Check types of `buffers` argument in `.Session.send`, so that TypeErrors are raised immediately, rather than in the eventloop. Changes: - In kernelspecs, if the executable is the string `python` (as opposed to an absolute path), `sys.executable` will be used rather than resolving `python` on PATH. This should enable Python-based kernels to install kernelspecs as part of wheels. - kernelspec names are now validated. They should only include ascii letters and numbers, plus period, hyphen, and underscore. Backward-incompatible changes: - :py`.datetime` objects returned in parsed messages are now always timezone-aware. Timestamps in messages without timezone info are interpreted as the local timezone, as this was the behavior in earlier versions. ## 4.4 ### 4.4.0 [4.4 on GitHub](https://github.com/jupyter/jupyter_client/milestones/4.4) - Add `.KernelClient.load_connection_info` on KernelClient, etc. for loading connection info directly from a dict, not just from files. - Include parent headers when adapting messages from older protocol implementations (treats parent headers the same as headers). - Compatibility fixes in tests for recent changes in ipykernel. ## 4.3 ### 4.3.0 [4.3 on GitHub](https://github.com/jupyter/jupyter_client/milestones/4.3) - Adds `--sys-prefix` argument to `jupyter kernelspec install`, for better symmetry with `jupyter nbextension install`, etc. ## 4.2 ### 4.2.2 [4.2.2 on GitHub](https://github.com/jupyter/jupyter_client/milestones/4.2.2) - Another fix for the `start_new_kernel` issue in 4.2.1 affecting slow-starting kernels. ### 4.2.1 [4.2.1 on GitHub](https://github.com/jupyter/jupyter_client/milestones/4.2.1) - Fix regression in 4.2 causing `start_new_kernel` to fail while waiting for kernels to become available. ### 4.2.0 [4.2.0 on GitHub](https://github.com/jupyter/jupyter_client/milestones/4.2) - added `jupyter kernelspec remove` for removing kernelspecs - allow specifying the environment for kernel processes via the `env` argument - added `name` field to connection files identifying the kernelspec name, so that consumers of connection files (alternate frontends) can identify the kernelspec in use - added `KernelSpecManager.get_all_specs` for getting all kernelspecs more efficiently - various improvements to error messages and documentation ## 4.1 ### 4.1.0 [4.1.0 on GitHub](https://github.com/jupyter/jupyter_client/milestones/4.1) Highlights: - Setuptools fixes for `jupyter kernelspec` - `jupyter kernelspec list` includes paths - add `KernelManager.blocking_client` - provisional implementation of `comm_info` requests from upcoming 5.1 release of the protocol ## 4.0 The first release of Jupyter Client as its own package. jupyter_client-8.6.2/CONTRIBUTING.md000066400000000000000000000003731462351563100170310ustar00rootroot00000000000000# Contributing We follow the [IPython Contributing Guide](https://github.com/ipython/ipython/blob/master/CONTRIBUTING.md). See the [README](https://github.com/jupyter/jupyter_client/blob/master/README.md) on how to set up a development environment. jupyter_client-8.6.2/LICENSE000066400000000000000000000030641462351563100156050ustar00rootroot00000000000000BSD 3-Clause License - Copyright (c) 2001-2015, IPython Development Team - Copyright (c) 2015-, Jupyter Development Team All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. jupyter_client-8.6.2/README.md000066400000000000000000000106401462351563100160550ustar00rootroot00000000000000# Jupyter Client [![Build Status](https://github.com/jupyter/jupyter_client/workflows/CI/badge.svg)](https://github.com/jupyter/jupyter_client/actions) [![Documentation Status](https://readthedocs.org/projects/jupyter-client/badge/?version=latest)](http://jupyter-client.readthedocs.io/en/latest/?badge=latest) `jupyter_client` contains the reference implementation of the [Jupyter protocol]. It also provides client and kernel management APIs for working with kernels. It also provides the `jupyter kernelspec` entrypoint for installing kernelspecs for use with Jupyter frontends. ## Development Setup The [Jupyter Contributor Guides](https://jupyter.readthedocs.io/en/latest/contributing/content-contributor.html) provide extensive information on contributing code or documentation to Jupyter projects. The limited instructions below for setting up a development environment are for your convenience. ## Coding You'll need Python and `pip` on the search path. Clone the Jupyter Client git repository to your computer, for example in `/my/project/jupyter_client` ```bash cd /my/projects/ git clone git@github.com:jupyter/jupyter_client.git ``` Now create an [editable install](https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs) and download the dependencies of code and test suite by executing: ```bash cd /my/projects/jupyter_client/ pip install -e ".[test]" pytest ``` The last command runs the test suite to verify the setup. During development, you can pass filenames to `pytest`, and it will execute only those tests. ## Documentation The documentation of Jupyter Client is generated from the files in `docs/` using Sphinx. Instructions for setting up Sphinx with a selection of optional modules are in the [Documentation Guide](https://jupyter.readthedocs.io/en/latest/contributing/docs-contributions/index.html). You'll also need the `make` command. For a minimal Sphinx installation to process the Jupyter Client docs, execute: ```bash pip install ".[doc]" ``` The following commands build the documentation in HTML format and check for broken links: ```bash cd /my/projects/jupyter_client/docs/ make html linkcheck ``` Point your browser to the following URL to access the generated documentation: _file:///my/projects/jupyter_client/docs/\_build/html/index.html_ ## Contributing `jupyter-client` has adopted automatic code formatting so you shouldn't need to worry too much about your code style. As long as your code is valid, the pre-commit hook should take care of how it should look. You can invoke the pre-commit hook by hand at any time with: ```bash pre-commit run ``` which should run any autoformatting on your code and tell you about any errors it couldn't fix automatically. You may also install [black integration](https://black.readthedocs.io/en/stable/integrations/editors.html) into your text editor to format code automatically. If you have already committed files before setting up the pre-commit hook with `pre-commit install`, you can fix everything up using `pre-commit run --all-files`. You need to make the fixing commit yourself after that. Some of the hooks only run on CI by default, but you can invoke them by running with the `--hook-stage manual` argument. ## About the Jupyter Development Team The Jupyter Development Team is the set of all contributors to the Jupyter project. This includes all of the Jupyter subprojects. The core team that coordinates development on GitHub can be found here: https://github.com/jupyter/. ## Our Copyright Policy Jupyter uses a shared copyright model. Each contributor maintains copyright over their contributions to Jupyter. But, it is important to note that these contributions are typically only changes to the repositories. Thus, the Jupyter source code, in its entirety is not the copyright of any single person or institution. Instead, it is the collective copyright of the entire Jupyter Development Team. If individual contributors want to maintain a record of what changes/contributions they have specific copyright on, they should indicate their copyright in the commit message of the change, when they commit the change to one of the Jupyter repositories. With this in mind, the following banner should be used in any source code file to indicate the copyright and license terms: ``` # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. ``` [jupyter protocol]: https://jupyter-client.readthedocs.io/en/latest/messaging.html jupyter_client-8.6.2/RELEASING.md000066400000000000000000000015421462351563100164320ustar00rootroot00000000000000# Releasing ## Using `jupyter_releaser` The recommended way to make a release is to use [`jupyter_releaser`](https://jupyter-releaser.readthedocs.io/en/latest/get_started/making_release_from_repo.html). ## Manual Release ### Prerequisites - First check that the CHANGELOG.md is up to date for the next release version - Install packaging requirements: `pip install pipx` ### Bump version - `export version=` - `pipx run hatch version ${version}` - `git tag -a ${version} -m {version}` ### Push to PyPI ```bash rm -rf dist/* rm -rf build/* pipx run build . pipx run twine check dist/* pipx run twine upload dist/* ``` ### Dev version - Bump the patch version and add the 'dev' tag back to the end of the version tuple using `pipx run hatch version ` ### Push to GitHub ```bash git push upstream && git push upstream --tags ``` jupyter_client-8.6.2/docs/000077500000000000000000000000001462351563100155255ustar00rootroot00000000000000jupyter_client-8.6.2/docs/Makefile000066400000000000000000000164211462351563100171710ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/jupyter_client.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/jupyter_client.qhc" applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/jupyter_client" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/jupyter_client" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." jupyter_client-8.6.2/docs/api/000077500000000000000000000000001462351563100162765ustar00rootroot00000000000000jupyter_client-8.6.2/docs/api/jupyter_client.asynchronous.rst000066400000000000000000000005201462351563100246170ustar00rootroot00000000000000jupyter\_client.asynchronous package ==================================== Submodules ---------- .. automodule:: jupyter_client.asynchronous.client :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: jupyter_client.asynchronous :members: :undoc-members: :show-inheritance: jupyter_client-8.6.2/docs/api/jupyter_client.blocking.rst000066400000000000000000000005001462351563100236520ustar00rootroot00000000000000jupyter\_client.blocking package ================================ Submodules ---------- .. automodule:: jupyter_client.blocking.client :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: jupyter_client.blocking :members: :undoc-members: :show-inheritance: jupyter_client-8.6.2/docs/api/jupyter_client.ioloop.rst000066400000000000000000000006411462351563100233710ustar00rootroot00000000000000jupyter\_client.ioloop package ============================== Submodules ---------- .. automodule:: jupyter_client.ioloop.manager :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.ioloop.restarter :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: jupyter_client.ioloop :members: :undoc-members: :show-inheritance: jupyter_client-8.6.2/docs/api/jupyter_client.provisioning.rst000066400000000000000000000010741462351563100246170ustar00rootroot00000000000000jupyter\_client.provisioning package ==================================== Submodules ---------- .. automodule:: jupyter_client.provisioning.factory :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.provisioning.local_provisioner :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.provisioning.provisioner_base :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: jupyter_client.provisioning :members: :undoc-members: :show-inheritance: jupyter_client-8.6.2/docs/api/jupyter_client.rst000066400000000000000000000047321462351563100220760ustar00rootroot00000000000000jupyter\_client package ======================= Subpackages ----------- .. toctree:: :maxdepth: 4 jupyter_client.asynchronous jupyter_client.blocking jupyter_client.ioloop jupyter_client.provisioning jupyter_client.ssh Submodules ---------- .. automodule:: jupyter_client.adapter :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.channels :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.channelsabc :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.client :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.clientabc :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.connect :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.consoleapp :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.jsonutil :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.kernelapp :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.kernelspec :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.kernelspecapp :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.launcher :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.localinterfaces :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.manager :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.managerabc :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.multikernelmanager :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.restarter :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.runapp :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.session :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.threaded :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.utils :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.win_interrupt :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: jupyter_client :members: :undoc-members: :show-inheritance: jupyter_client-8.6.2/docs/api/jupyter_client.ssh.rst000066400000000000000000000006171462351563100226700ustar00rootroot00000000000000jupyter\_client.ssh package =========================== Submodules ---------- .. automodule:: jupyter_client.ssh.forward :members: :undoc-members: :show-inheritance: .. automodule:: jupyter_client.ssh.tunnel :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: jupyter_client.ssh :members: :undoc-members: :show-inheritance: jupyter_client-8.6.2/docs/api/modules.rst000066400000000000000000000001171462351563100204770ustar00rootroot00000000000000jupyter_client ============== .. toctree:: :maxdepth: 4 jupyter_client jupyter_client-8.6.2/docs/conf.py000066400000000000000000000254011462351563100170260ustar00rootroot00000000000000#!/usr/bin/env python3 # # jupyter_client documentation build configuration file, created by # sphinx-quickstart on Tue May 26 15:41:51 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import logging as pylogging import os import os.path as osp import shutil from sphinx.util import logging # type:ignore[import-not-found] # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "myst_parser", "sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.napoleon", "sphinxcontrib_github_alt", "sphinx_autodoc_typehints", ] # Workaround for https://github.com/agronholm/sphinx-autodoc-typehints/issues/123 class FilterForIssue123(pylogging.Filter): def filter(self, record: pylogging.LogRecord) -> bool: return not record.getMessage().startswith("Cannot handle as a local function") logging.getLogger("sphinx_autodoc_typehints").logger.addFilter(FilterForIssue123()) # End of a workaround try: import enchant # type:ignore[import-not-found] # noqa extensions += ["sphinxcontrib.spelling"] except ImportError: pass myst_enable_extensions = ["html_image"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = "index" # General information about the project. project = "jupyter_client" copyright = "2015, Jupyter Development Team" author = "Jupyter Development Team" github_project_url = "https://github.com/jupyter/jupyter_client" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # version_ns: dict = {} here = os.path.dirname(__file__) version_py = os.path.join(here, os.pardir, "jupyter_client", "_version.py") with open(version_py) as f: exec(compile(f.read(), version_py, "exec"), version_ns) # noqa # The short X.Y version. version = "%i.%i" % version_ns["version_info"][:2] # The full version, including alpha/beta/rc tags. release = version_ns["__version__"] # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = "en" # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "pydata_sphinx_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = {"navigation_with_keys": False} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = "jupyter_clientdoc" # -- Options for LaTeX output --------------------------------------------- latex_elements: dict = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', # Latex figure (float) alignment # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( master_doc, "jupyter_client.tex", "jupyter\\_client Documentation", "Jupyter Development Team", "manual", ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, "jupyter_client", "jupyter_client Documentation", [author], 1)] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "jupyter_client", "jupyter_client Documentation", author, "jupyter_client", "One line description of project.", "Miscellaneous", ), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {"ipython": ("https://ipython.readthedocs.io/en/stable/", None)} def setup(app: object) -> None: HERE = osp.abspath(osp.dirname(__file__)) dest = osp.join(HERE, "changelog.md") shutil.copy(osp.join(HERE, "..", "CHANGELOG.md"), dest) jupyter_client-8.6.2/docs/figs/000077500000000000000000000000001462351563100164555ustar00rootroot00000000000000jupyter_client-8.6.2/docs/figs/frontend-kernel.png000066400000000000000000004537511462351563100222770ustar00rootroot00000000000000PNG  IHDRsBIT|d pHYs B(xtEXtSoftwarewww.inkscape.org< IDATxw`U-ҀFDAZyDPA tT H$${uvhy̞=Ν{=\^GBBBBa"fշ[ QhPHDBrT%$$$$$$$$$$$$*!!!!!!!!!!!!ѠU JHHHHHHHHHHHH4($GUBBBBBBBBBBBBA!9 QhP([ pOO"@&oU$$18WTcHQ]\vРVEBjj*flQQ}"!!!!qB%$$$ˋV=zԷ8ڶO}!!!!!qU E>w"Hi%gU%]ŏEEVFLLOGշ0?_ZoZ<~ݺ՗7>|y uFVn2Ujku~Fqr(ulh8$$NڦRYL@PSu:tMu;9Mzt\D&$꟱Q={kq=M[ ǏLz[oW֭%'UCYFPȭ8Vf_=!ȈRט3{61:%GK p΢E<4t(sgY|OQ5j-]`U\/*ʍrm/r5wqߠ k I5'ʙ3t?O=sX6kF~̮ tRlkIIdԤٵB}/kp,-q#G̚MXr%/f) 9l[p!?nΈ>}e߾},;?Q,\}˩*+ pqqa'K݆ikr$$'nu^çqc" {^HK#'#ñaz= Ç#W((8^hJLĻQ#z=?TE- /kMoFW m Xs]FL<;Զm&xr/PQZ$z&MSf۵[2'5#\uٮd޽ ݛQO> @uU LlrSEemUlٵ{taʌt׏ت=f!6l(-(`ҥ֍&Y2;INnxAAN ^ Hz<9tt78ܼy}y&[qdXvoP*ZIhd2JJEYWOOSrr<)yqn!I=(˫˔JS~]Q*u:m>!!~L&C]Y߲MUQQkSm(}}ѪTd2Teex9P1ވ𾙨(sTk)q1=O?MwFƳV~'^'۷硧:{3璓ٵmI))뮻v ȭ,>|Mz%&2)*GgK/Q(Ҿ=qJEmKxxؔ#!P0z5 U gW(O/6'GϞG1VkS*qvq{$'`>=r%T(NJmiƍL:B3. !뤪ҕ+{. QtL5 &ee!็!oXL:OьXɺe˸͇}bj }Yx1/M 23y'mP(vb97Sڵk㪊 2.]"n\~a!^^f}(Q&LP.!PȾt[NlCqq1A$TSEIӧ9o23 c7ibq]]]ۨM۬CWΞe-2H`Wy?p9+͚-1:X*loknZsСDǛa6VεkIMKC Cz۹oGvT*{ Drrrؿs''O$ {vضA:璒d<.Y}No޽̘7]2|߸[ՙ3ͮ5{K]]͢ q_}=2Q+<';u:-\Hի$ZMk`iEK} &5kfs@`}W7u*Wsrx`-U*ti҄Q+WؖD>} `~CЪukâ%Kh(O4òMw͝KX&MHff&_.[e`@hY]3jP kZdC69S 䁗Z_`rJl9`joq̡}˖<3yM2BR#Yv5 g5p ;ubm.^Yp0۵_ 7_~ɢ~eL }yf Q_Rt&eel޵1#G򫯒tKVxqt` ШٷweQZYlQQUZ-ZjJKJLmwƽݺ &:&]۶QXRjqq1f=<<o(( #̙CNNObq^VKv̚Vef$?mۘGk+X}S^yHZFEqa;ƴٳ-)RȮYp!{Ϟ6mj.gtj*oMf-Ɛڨ$/^yjn;r6l RGro$Uor쇬ٶi ~=u놓;֬G^z:\njȪUYbbXq#'ys\CNVz#ΜoϞOI>mE5 eU[5?CvkwB||xu \==`PZ`ފ#77OYt4/^^l/4i6t,2Ǟ{QQVClΑ;9z! yw*rD:uӇ}./.fSO1[7Fbe΢!g ?|&ˊKYp]fט7+-;g1'RRU\ w @CdtNޱop0yuvG(P*mzTӦоwoN9áG"Z-j#z)SuMC|`NcL&cW_9GF6@d J%:vd}qM+BۄxyyQU]mR*42RrT%Dv$W_~IŋZFae y ѣZ8>ѽ{6aٵpM_͡dۋӵWI3+Յc򊋙駄n^cʄ l_Er‚^k;K/C/zًZi޻a{;]{2'q2 t6lTWf:2/+ 9}Ǟ|+-ٸZ,~`Or2%(.뉷2zuT\>}'Ǎ#m["r+9E=xP틇ﴕ3#;zɱ(.bVѭZs۰\r2֮W^aV,WҲICBؿe "@cLjl҄}w*99 7!F #{9GΟOai)3̱EΈx{yq#?bu**kؖKm kߞCIIh,vv \̤Z='˜E8jPu> SOxU Vߝgo6͂`'FL;Q_jIٳߐ U~$uUyً]P֡Cȸqoz@<4lo}rssNDSԴ)5<sMϏVwGTTgϟ4qCxz fs[f羥;#'N4 fF<_XAzz:3O٫sg8('^}ծq{kU*qvu5pwfAfarܴXNVFssu8r#O˶ur/\@֭iӫPӭ[7n؀N1V4߷? y]m*LSw=fvWmv ʘ8jj57JJӱ#o͘AhN␔bBWu~JU*ڷhoa1b6H~?lΔ'yVͶ"gĸq\t'Nu˖OMe: q,LJ+W"S!K/W^NvnB9EEШx`h(~f$$"j-T2={xFg{ur9AA|㏦gs4 %ͷB+LRsi'8kܹ{觟Ν$_QUGRI||K!$?NdX.A>LJ^{{GfajԊș3{Vh /^$F h:>Wٷcaa4*zeuuUϧ=w[MY:h| K c£{39b',U|zzy1H=yWWWw|f!YIq ~?~%|bQK@FF# a/=g+gV0Y@PJ Zm:j))-EڎJ7BB++-Bz5l 53=p9yu yN觑|rsq.-Νͮ-4n駟VyLnVL ^|McZ,Z7}{b(dH:uĎd, Xf f~0L^;wq^S[Xa^%hZ:J*JK'P]=z޼y̟:>p:-$tla3@Feec/JO77,ruaLBVXcfٵ3Ш*/gGν}rp.9Ozvc >_'&.#z IDAT7%i/8GضޢϪ**m{ N}A}#Gy*^MeU:o]{I>Ï>c| xƌZ9,.eg(S{رng`Bf&j~ٺ 3涝pJtauɓ- ݙ`[c|v-?$ѣq,Yb7XB^D,aa7L#:,R)+,dɃyVlЯvZ.0a`ES5f^j|*x9`HB9~|j* &vl \Xŋ&4mޜGKtrY[m'ݻwӳ];6R[N={Ǿ}lܶ \+*X'o͢в%stO_}E Yl.z=n*Xdj(Q(v.o駄X!plͪ2{rt:&s.W~4cP[ٸm3~mUeee5'4rs$ۛ1pr|;Ѥ$z1 ^sBT6TVUܹ]MLb~O{/Vm3bͶ|;KwÃ/رXw5@nOrMVo_~!><}rq*"cbl;"ۼ)DV4ɨ)yDAfgeV^o}eegfN9S}{\E!{/'x'~~۳|:}:xiXoooz%$inOx/8O/fɧfz  ( k/Y7K2B"" } }M#V\xwT_q{/OXbjΘAx֖DY*++oWTеkW4bTX}/?{6We˖z}zYٻ[."X$Fʉ8w ob+wޱxOϲ͜IL_qJP֡_e̛֭;;vd 6mjJ׋oߞ~;vоgOӵƐbD._4)P8z4۶mc]1lܷwsifUK;crJKyQAN+WݬOqHJd5!, yMIAV L&"oߟMJ`ުuk@Zm%ڸo6mFa!CF,v)ϟ翯!-4*{IKO7L,  g)Sy2m۲i $*j5)$?CG~n.Z`KH-3yET݈x"N(g3oK zO:O:>y'6C1cfENtv|ZK\uO5xp;շWX˟>p E۶YC<("!yO{:ٻL1ɵ10ku͛k# XDf `+o[8d횜Wyɼvf}NBt:Zڔ{H<ȞѪT8Cm4foȿy^իiwYUN 9sgDFothPfйsg(ڶm,/zaڹV 7}7PCӖ-xWen>L'C6V5۬}U+'Jνz1,󣼢xYY||mѢA?GvWTkAٳL_u޼M6nziE3B?^ʺP[۳'`;Jd30j49r({?oDϢ+ܽ;Ǐxj*V"!oqqߟO.%7'NǪ`g%$"2 ,SBfzz4&% ugZib C&Ѥ}{~˲.TUTboˡ~"?ZM4*J/77ݕ\kzJJ6 d2bbXVC_k] m׎|Z-$L;l3T`vtTTVk hт53gbp\BYq1z!ڕ9䨪lڴ $$6vGڞf̠3g,"[jSǰ QSa%UBkot>vlȮyhPV>^-}海"V\8c^}W?0Fêtu~Q?jMFsiyNQ7kcHٓ5u V=e[~X|9gޣ_}EVk ؞HD'7ٲ%7g#ňύ?o׮ek1c~ݻ7'Ol2?GN >'۶:aQ(ͥK\/_6N2'a(٪c,3 -Y7# ?(@Zj Ϛmʿ*[̙x=lߵH,X@-,K\ӓ%~˧|qwwgL ڲhYKTmyNϜKn^͛6em( u2-۴;?>mڶYh#Hʷf[g{ݙ<#(eP :gU$$b f:9>Mc'嶑Ɉ9BxwDN@d$tv&ATQafh &O;v$$[9x|-lؽLĄVo` ima33p9d2ӧUG ̜61zx2aa,^Զ+ƨI+dRydzM1[Q=Leu5(,];f͘'ѯ_?h̬ޣ!~3ϐc=^uVVm"'2.o?fΝ2z =cj@}Lk&1 aR A6kA9hZfBXe;wZ=W[!O^C^)/lɄFF2wٲsn#w?ת6ByNfVl$4NyNO@Y- =j͕8V:s EEdl21b'}KOs%#VYڨIN-uLLdYn]F˽Æq1\\]x[msϣ/cLjfڄIJwI;J 3>Æq>%JElNfjtŶhڸ/S^ZJTBX83Č~IͩcH7//bK9JobSV=^o׉1͍"?tiQj8~ĪJNvj4f9\%5jNU*J'#m++,4piZTWT 3|Օ\MgOsru";aJqCwj+d>^_ a:axWrQ4*Uu $İ:~:q!sgݼKy\(J;wP,Ǐr;BHVJt r< ɴpyr/^3]jp%),0W7olb1JJꬓGPa:&?O +r..DvfzHH72={UhLυ81];۵==QOT6la ڳ-2.ȸ8: ǰ6*+ܧ]}d(n#!>AA5h]YB:e68!z[_E;A_[r-ۚ ڒclmR{ѣ6X២.\3sr"QY^N~d4ؑf9 Q6d2["eV9Rg9:'9s[A5W: T]YI3CSvs~rt:n5+..rmV0hΚU\0Okޣ2]esjx7oL&LJ녅lms۲rm_@f$7*)ܞ=Z/ZMʆ e"&yzb }^?0N+͛.ʉrs9mm47̣G߿9դlD iz56z=g W//I۲XlV7np߈0, iْkP|i"vQWUs*–j?WQZ9s- {~z7mʶŋ lڴnr7q  8:|C\bbT -r`Zµsb rW_.9/oHhܦ ?})]9Es&##[T V}hC(} <MpY{QBg& kzsTNvߒL=L&#Fwߡ GW#ȕJb 5.gw;f[;%!(&K>uuŷiSTxx *1'NFuy9pܽ @ѵkw f}} rHNd]Wp0:Zfm-Cu+W1ۼMAZWH^ԼcG ^%NGPpoӆ75Nk]w\85m$J4dɉSW( F0b*KܨQU&mڠr('> CA1c(ϯmJ//bC&?T1jE&vӄ]x**Lu!Kdg@#FP^PPg9NM-k3z4T1jE&MTQUQQg}7 _fx7jDEaaED/7GUBBBBTTJvի=ʅ S'=CZИ,FEY p'NPOtf\s~Za\bӎX7 tkuo` ##͜RqoS]^Çm8"F 7a8m/relI!|VH\9{-68g_Liɾp˗i۫xl[dRS()!FXR@mF\.7zd,>LfV%ed]F(#HHSd2<Cc,qru5m-ww̶@eLXɕJFRd..8)(Ftl/Qթ{!AuZ-SWg9>>Daf!n8åP*OCDEAFQH˖42 j5rh6ϏMa^7ϜVFFgÜ wWǥÇrroLFX PLW` ݺf '|;8գuqv6YǏrn\NhBuABB\c_|Z}} k֌x[}6xWz/\HRFSLѣMe:'_|NB};VV܌^3ˬq-pwgP><+f9; }]ӧ6^Azn6rU CP0wB"7EBou gZn.>2G߽Wfu+~u:ePf빜+bji V]KqE~d7<8b >/?dҥo|Sxxpon<;f!ْ*)s`EUP!v(,t(W߬SWVR%]=s6~5^x_́'[7kfaWl7[ۭF/7p:oV]$gŧ?3n`Epؿ[2Q qB@)11(+Dr26o&8$_zjIH'2 Fß_~I6m,B Ue%W0n>>uu:ne8|}DŋmVW` -QUTp`2B۶mTiX~=AAuid:E pvw9[MPD;v?@:*+C~OӦuοiܸ&MBĹ wR5^Y۶4M~feUI : ` zsTBB<< JXN\KI!$:29P/4ˆ+W'xfU||l{)¯Y3j5}$YYDvP"7^bZwp0n#DtB+xtv11:sk׈n5:[(L&Mpq0$Mo/(!q'46mDfͫռٶ11^bu7b>3Hi)?KsY +*Q_ddвW/:[ BAZ*"eKݻ7YY&'R6l y M+D{CrBCq;SMt4ؑ'Of09hVۛhQ)ONFcc)ˣ[76oQL 7/nwG#G8q822((( 0(=z{Ha@cܬ-=KL*q3gnޜPCxTzL߿Ff&ဠsǎ'0ȶmM _Adg}͍-Zdbmt!np^〠SC++:Խ{!l#jNE99ܰN*淟~Mǎ63Ჳ1vAv6YOӲC))4}ǡe@~v6SMɓ&;6m5kzݼ1kx}L6Yop#,ZKӦ!%G{/f}6n[,8ܝhKomٲB>o9h&gJ_x>.66P+ٹs'GdZoof׆eXr%_}XHl[i~>_#z{ b8'E[ Wn 䌤={hLۘ2,.6?_/YBZf&A䗗Ā^3)s3ʩ,(?B5}p/`JH[o޼ … YZQLgM-lyiH&MH=qiAN(+(Q4"˩j;mEO7>ԦmUL3s |<<;t oYpZA<5a '?5gk'ttYA\04U7e[hOhVccQdɱ}ص3gLL 4< $$?U8sTTT(߽;LfLƪyHMK><3/ӥy/bg§xPOdQ'rޞЧo_ڪu$Sz {OFO=eA{hBڵióӦYX2}:iiL~ ZhD}/dǶm|E6.]o;vɲe4fc;}Su:,_n\.7ìY/s:m1cIm޷WufH^0n WG#;t%V1$1},{GtF} cUɈڕVsQ08}4]H;u Jez3Ο'ÃNwͩ;L+?VP//m20??>+\Ғz_/l 2' +|lo= AyzZ4/RHC0Q&JC#i ZЄ_9U*ÂO=Çev&A[]_i)aFU˽\j]%W8.qeee8;:boRUUe3T*jeFqv1~r(RVXwr^Cb#"9}:[&+#[ͯ{_|a()5EDG;vX]5+Ws NN:}v֌;%s^Rq>RfΆ7o. `斕J vDGEв9 Æ1IٻVso __B }L$1I󥚵j˗s CmQ.䥏l-5?y2Μ7yZv.+jbΜ|C$dj!!Tidefs ptqUvfnr UZMhnqrvnߎaQ.zn K-ЧI}ծ޵>+3ϜaWJ 聇8&܆K/=P768`j5N">:D?]uq:TFdԶ{r;;de7rTjc_xx0v8ӗ_VYWܾ͸_$orN-# CZZ2V-^Do[FmCXf<#MOЖ-F f}{puqH}۸9eeˣE|ˣGhXyee9B޽e?-Zt\N:DPNͦ}\ 7YaŽ{&LqxWpf~>O=8Zm0꥗ TxVW0 cbo҄=e~DR%wvyr^? ]+13k.E5j9T 1rD|cJ:ohpKO]eeܘ}oxط/ -"$"p.䧕+D ZI[oiH/RVUE@`nR … :yî;Q*h9B>}hٹ3>gs!"H;{FHNT3UOGBAEu5k,A))+'] ikZ4 y7o{nn޺E4O.ZZuΨ}XjA,ߴWǎ(}MM1;!L#oB}:4ML=7RW˵z.\`ڵJMeF˽RP::BKCv%$ƹ4TWs,%ReźuܸtO8 ": cc(}Ut+*8|0Z<.߼ɀmIz!#RL?=}s 3.iS{qޘ9vQQ<ꫲD 1ؘ,¡*=MX_85JKpqq1y3W,tcfj^ SrV鐘ȸID~Hv{< RfϙCiD|6XC#Fy:)3gMRϞy?O?-18*=Q*@$}Mz$x7}.S>6h`8^7)j3JBQ nBoɤSӹ3M##)--ɓ4 eKAD0?f MQa yyK}}:ti*go\(!#~+CodUֵ7QՕ,] H8,XKNq-_F˶mq">:)) SIMK#.&;*KJM/Xrz<7&0*) ^}G.&73;I9zE 3`>LjZӇ={B~[DLD`NDt4?^ͪE8z(%%tLN慗^}^wGțPvCg`ɣzX߆~t3.gee, )cmvŅSǎQpѡDiB$>.?RN2ǍP2 U7[[~]ʢ^Jrh< v-X@HP݇ 1;F(ӧZqClЛp\Bj)+.6_UUUlQ+ 6hJ ڪ*lde_DaEᵤ2VT:wPP䄷73 u{C:w+ӧ =xp!ysVmյf&87'!>2fZP( &1*siiIK#d!BeG.t6~LcVk0|d擫'G6:gҌJ.P)Žn̓C𠠢p,MɜGƍ3?_sr9顇"i0(X=:MG4 St/#,b"G͍G_|>=-vߟN or(S'bCBؽg#ǏGThXw58SC5zyCzz:W^%!!jٶ-cO?q=?6Ne\< n%%(2ͩݹsGMzJ:#}NkdlÆ jAXt|":JR6ŋ9};I2CjD "G"c24kZvB+(!<Ν6g67o/n7 ˋ|pZ6y]/*bӢE Q-8~ٴɰ_2acY1%"'/<+ _~99|Ǣ*mu4\q8Ǫ-[X1o%nAu5GmcŪU$XP`Ed1zu[[[0;vEIiG'`Q .kۤ0v7PVX۬n5A%;=ݐY|u#N؁Wh(*;;nddԋNaN]yWMUTFJΝx*jZCa?KLC+5׀]/:ڙ)mlb:*} ۹SVZ:[` {ԋNei)Ņc@/_흜6hi@!%8ӧh]Z\Ső~5kQ(hڰ!9x(oM 'ժQ(NPD͚1F>?в%3'):F )t)kׯ7 -[&KG0zܺ>0#7oիx1}{q<'3<"yAxxq:}13g{{s`'s:pIy7z$ndgl޾FsǏ|@NQ^Kz=!.cW,OVX!G\#"XGaLё /) BCyaHbfb NN0QQ&Ypqw˥KyaRXڷ7t2A&FxaHjMh1Kܔsګ[6m";;p @=&\ xstqi*im _+U?X>DDczDZllTN'k?A E}CtBjkΎF@`t4ח:1[{{ 4LHaB¿B FXqW!ҳj^5.=ğ6pBÙѷ/^^^,'z~ߴ tNׄ|NNЁid8ޜJ(N:%;~o]D?Dq k#uk=-۶b:2cݝf ]^Q7WW:,GQX'Xz5Z߾ADǜ~,D a#)of[~A~^;vPX\LM3jG$0,L7m7:(ȱ5{a H^^XJã]r1ߏHk ϙ&"s /3?|!oC͌ ]ɘII'%iLŜWfȅJ۾]#sŔr̍K1sr~$W+ f7ooZCțVXaVX9R6=zF!DAؽf >B@Q矲 |I>yR^^3_|nsM¸)SDI}d~<=\c0p^P-om|)ڷ5KGݪjp%qZKј=olҬ)hZmڄ._ٙGgr22h ؟Ԩ@eoScan.ػXDGrhu +NѠY3\9s*]tPYC8D-n^B%% wv#K.+-%'=_An]qaluw ap'TQ^uy\;wbC.^]Q]UEʶmDaqmw .qSDkgYVTĹ;,/V@Ӧ䙦)~b:^^DkgҸq'NXLGmkKd8PRPݻ-k5xgss-GxֆP֙3d=k1{{%%CssgUU MLSͩܮ]55 ,g<=Xaſ 77^Q˖u>.)(WYVTT' yhMu5:oDk4oEUEEyu:vޔ :@Ue%oH͛C ZMyIIy+q]ZKJ =r휜'*d o@Z`|B»qzMO/4p6@pan߸a1{77B[B{d:eGTV5垁B2BeG۸15"JA 79r,֖V 8/- VK@TJue%PVeLJӧɻxb:j{{B۴ 7oyż)X<$!Xs7AZ?B:a}΍a֢E(JXALyX ;,Y^ؚfi[֢>64 \?uCPM_իšee:?#6frjbwϏC+V$ՠ.ȿ|+WӇ\:v̐j *JJ8q#njAղoߐ7VWh5RƁK JL4|,s,*ɨȑlIO'$jUx1>.n>QCUEN~~ N$l,++#_?J%Ź߾dNtJJOJkעV*-vvTUVڠ t!WQ@PK(~={ &-Ayi);V5^WaHؿyFVLMZp j4 {WW+*8nv+n?qb//o_=;E5Ʀ,") G =R(T*Bu 0%ͺwtV{tԶD\WFE+YaŽ,>:%sp=(b윝q=Ѿ=))SQR=->aad?@Nz:mn 'fqmڐw2~~TWVR/:MpCA9m륌98 Ÿ+bb( F.R4%ezk*!{W8{{?x0"_ܢh+H 16m:xc:j;;{tqVXaVT_⪊ RvϏPE{zz:ONxtofprq'(E ai={(/-=ϷaCtQ+RL(~SRTTpy0׿}(JUOك-x@vIy@EM=\IKYFiPRTDHӦBjN؁W` H'+ +7pU.)7+W'<0;(G诣;M =뮦pD54sPپ=n2E99iyjrusIݾz:yy49.\?wb:j[[''Wr~^4]oK`@MXZ֣;t@?732,ccoON iVXq?BX_nkޙ50Cͼ@i`_ENeu5j5++iŋFҰbq_}g_G hu ][fO"t$-{6 nHF f }T石v ps#7GG[dbrB@Ot**1{6wgt[YqBSUŭl0NR(WxcPJ_VXHi=R\\DQue%Qtc=negG,W񳤠".!SUQAQ=j%; ]Cka n<*-!~][:zy kܭah w"i8y3aab-Aqn.GWpYף|uEG-? v@y?iNxpb:NaV'6lŀ?Oի⭲Vqj9f - 3Z-'׮=0'\Ixǎ mDe]KVQRUHToYqa={ˎS8e eZHB:-_KӡW/<9k ~/?.;g r2-[FNn.3:V(tOLd?^ނ ߾6 htՋH]˙O8[L)\=6V 㞀vgRH3EЇ[o?3oݮ_{n}ug]Zq_~I`lŹJYCKľŋ\?jZv>dXMN()ae1oUee݋g~rh y[GGRo(/zE޺5%[5JK)+(͈p! Z-/ϣ!e***,. 5! 4BܵC=@xSRzVn^LPtt*vj?vNN#UBB]@Rxsa\!8>~9;*+qW4xJk=\y\!MZ/cV9jŋ>r{R`ǶmmfcShj0`:YYff{T( ]\䮇3a~;}%xSP1!%S6ofŎ4j#^|pLhT㣢Ⱦq=۶jmݘcO>&Pj=75<~`ONf´iF^uojt\ P U+# M҈IG]qm* Ƹ&UۨWSRh̍tA婵kQ(\IIix3Jq={L},!deVѦ !z;UEj]a͋o҄[߿^tRV&86ݐ\U)}8\%=J8!ACq$P[[ 3ʻґv9r._p0}=++9s'7rrptp Q#53jhNŋt`T2y22KN6(R|m={8}8 `U* yJ59.qy\E!jc(-*" vVX!r mףVH֍Z1Pk(짧 nЀ%ìYtۗH]¨ 5aX[h}{_zx1gO% 'ƆÇҙcjNti7= 0ZZk~` ޝ_bJB#:j]Ū 6vv&@uE dE`!dT*cKiQ>!!@[;;<2NXBQkd`%ÛR>⭤k *1̑OGK]2P`ccS2E_ϛZj54UU5X4hEImccxnrGM޵)%iӧsϜM9e sbNvv䗗c^WZ!K5a!I ##7`4T͛_{Z5if& `Ux1WBiy9JJ x`E Þ[OlQ[ҥ,o;]^R)߈_ܳ'}JF54??' c@At4.ZyG^ӍG+dn,gJ}&8RVa$nh(eO?׺u<#54QSdfd~*-[B2k:~j"ù|RKmЕJx鯼""c/,' ̤Qp0>Uy^RB+]xPp2)]J;Iċ9o- ,Tw\w7Vڻv*жK ЬSdUZqT擙3i}f\=o̲%K8IiE~~mْG_|Q'UZMHQ*trp͍6-[ЈFդrHHqHvM~z1d8Y:r\kגFqe%D4lȰQ̥W|⋨xO3}x\\\8sјtjK[5q"(&y~+6,^̯˗3wT9>{ sgɉ~_]DW3iݶ-#'M2;NU܋uTi4$6m-[񊙛з6i¤ jՊӇz*ޛ9ʖ(2M|}ϛoPZRB͛f~%~AADm+KG:&RZwNl >Dܺb)]4IL]7/P9{6S>T7D4`LN]p/DyHBk>kz ywJOIwET2e4#&~m&Vt/1rHT*AtjՊ[/c+%4#\]yOw/{vѣt)/晡Cy7 rMP˩..Vkh1' UUJQT^;;;~)KWs/_|A֭Q.) <=I ɻ}[xouPlJVwɓ!|)_mD_RQ[ZŠ bΝx1;FS Bsh|!x[7\8{4mLQa!/xͶJ6Y,#-**jf&#.]╏>L9; LodӦc9r~k>':%$0a8|||ȹ~[ڔ)|0u* 9Eɤ?A>jEj3hhdzn };o?owwƿ~Pja:#OZb,HtۻP5qCBt4M7foTn.6R'Ck]VݻɅ9!4MJkl,qjչ3_|.dvRaRkߠP04iJԳO<Ud 7ggB6\\h.RJ R$!yrAR >@U\8~y{[[CHEIƪW04k9L)۶lA2﫯WYP֕+# *j)u JEBr2 :%;#|B-IS#ME@ nڔYBN Ϗ[]GN \Q  F?>poPsߵZ٬0}nNJ5kxWFcTQՖG5GT%ns>=0[m&em"; ws>2B6hUUlޱnz[H*1gD3>g<"8\>&mDKr;Ʒ˗p.L6M9l/=$6ݻ-wN]Vd)M(ǥߔ{-f4ǛQ+^O?qǏײ%/?]5]=̅6Ifmmiӣ*nͯ6d ST$ݸ9d&s8u >l1>#FЮsg S.Ink'S a]܄));u*;c0*JX3˓Vp>mNuJ{AW TÖzTt*է 2WBPХW/j5]d[gOS U+_fyky6m)..fM),ERGh ..\u#[f ӧNqWQy7n1I?@}mrѣ:z*iÖ-[bЦM3>W,]yKߟ &IDC>o]Ns% j}5`bN*NsEtZu`k]/O;ף&Re׮]wNjkDEBAaq1F/-Wk9měDjZW5Pj*.dzbl^R ~L,}&_BUr÷\IʥK*,d.IѠ6Rt X%%#<z">,\+sXY IquT  64 gMP`RٵUB /UZ-SSiFx./ghU*1)dxSƎ)q.7̾cnj) CRIuUaWTjX1mˌ/DPpjH>3f Xt XZa Q(/Faa&&b#1T$v1cTVsƪqq|l~KΦE<fc{uJ}#>t_6@d:%%jGU2֭e6,]ʑ#GqcbxOz=0qcvʎKRнxGjߞgH.]ҥx3᥉l҄qÇ۠v]y78slAy={JFJެ~P[6lCs1o ޶6=z'(Q/( /+}۶|R^2 мuk~-T@6m/K[8J3h ]7sr|uN6mg{'ճ @L |=_|2{U**Yգzݽ_߶u+Q<=f踿mc]pAKs _3ya]] :r .TB1jZ>L@jUg*Ja]=rO?BaC@q~>/\(:f£Z[^5wȑ{{lwyE^NN߲ͣeKu+³&{1CǗ7G +/x<|}yw}-UU*JJXUT1ś4?It?d*MgoCt eHI!,(W//1o2Q %3ÆbVϝktMSt D7o +IٶiFcVYZJvz:v( rr(+,c6^c '+5b:h4ܼt ?poڄb:]fO7oRQ\\/ގoڄGppMȗBAnffxJMEmo_[X'loe.(@P`̍˗,-{Xp+;^]h79c~+ٳ{f  oR=֖Wywf|Eyyu֏N)mjn7KJx𡇌nr>}hMx7Qؘmq'H=s~˺Mcg$$G[HsT/.禴,.f} }X+{ uxrM62-Yªi Mu5id0gO .AhAyK>@:9 WT $OǚE Gգe߆ ӱwu%ZW= ҂V͝k1JEջ7PDF⦫CؼfAo!i ʉMecCN* uʺ j4i (h޳'׬\ң.p@[aax~u6hC!xA~Y5˖񬮦P:6hQ^ϒF+£0e49q#'))JMePN4lҤF7IyCn{-)iXDL mXbeee\r F?q+Z-}a͛3xifOY7u"%qwgc~۸=^=tnjȁd>M-D ~nߞgΐ yI1NP.///i$G7fQQ<1x0NNNjsN|74۶q%3-#ZU۶|#If<#8IӦ< j7}{HnEVӲM4WJ3O LfDyY믤K]E02h; NVXq72_Ėݝ^1ZM1fhq&D7K?EXc:n64ӱŠ :ݸ} cP^KNYX9{,۶o'(0A{o3" :$8rN8BQSIF<|"?9nnߧwm{JQaN۴ ϐ d=JPӦɻv VB7<-[ X(-My ]̅jߴ 0=kT.ȾpGG ^O: n1oeoSRXB֖ccooF KNFPP]]͍qi|\.< P'RV\[ Vmdy C?Uj5ڵ3޺ř۩,/x ~o8IZ:iK[FeUQ "*(ąDE@E\(.p# Ҳ7"P@V-BWr?K敗6sܓOjZG[ 7n̙XHAA4͞ov(|ڌFt@uT酎Sڵٺ5bbH9}/On𶮾x(ǶmCUwΛ`nT %;AJoOcQϞ泊PO^6E &%|}Q"5L>}ZB,f3Ym]+T iկ_8e[Oh^iD|PŒ3 Y\R/\B*חk׊]:ߐE˘ʕ+r(Z|3OXTr*w'oJbb "M{ń8@a5Zv2 qq GEnM¶m\MN.d?4-[ڇr?r#G \9"[ڠ1r~B< ի9++Ѣ9sI חm]|}NJBQeJ'hQ"? EQW4sT\ݦ?]^̙X& }m XL ժ٢{׬O"4]X:Ezj*-СO[Vc0OTJHJOg?i⧆ 7,E4DUUDpS;*j,bX ſ*=m6l`E!E[fEΝ)_{Wr糠, 5jPeKNlNʉEͦ 4λ|G7m*R.;;zݺEY,B֥jt4bcI=sHi3x{Ө_?ND\\ҖEþ}VMczT^^;t_o.a!(B|Ȣ$|˗f>eQL&{oݨi*aNǎ:qӄ(EٳTwoj~L&ժy> ?$HqӡMZRWiʸ)CD&xk((DpD=m//V8G͖-IMLkXXA*qTnذi e+TjJ nМ`w3@ 7U4ϿJ ^V PeK{(_o_( \lUX7^zu{pcEd(C{.yuCqq\cDC=콯øz XZM `l,Y4s󧴒Ϝa֭XTS]!Jmj{:CCݬnmFk4?k11d\fߟ,b6sm8Gw"5)ݻ;w/WT"px6vQa`Xhܱ#޾-yO~T^Dw9;]V*5{TիgFݰl*֨AHs߶-WfcPZ5kph}i;{oZaPsg*Xv59}kz~j^&:wg ;vpj߾|Z,Tm>NO˖~ǎ\Y0__Vtju}@PVѣTN[_xwI uO;plau֠]Ә4=z9WbR>,W~ΜW^u˖4n6N~11b0??m˃O?oeĉb],lW ?w/>< 0o ;ٹgX,|'TSqkX-DMԛժ=LM_$bE^_P^ ̛A7x0ϼ׬s-I~ =LU՝߳ukؑ <>{6Fon{߫0rBxXsx.:DX͚ϥ>MիCjt] _p0aa-f3Ɓs.9Cz Z>wrt\ r߮Z,\8~Hi cT%V-wGpcE/4tx^4͜ڳjMa(Kغb﹇%zB~Y70``40/ UP'm5kT͛:˗t=k/s?[( ̶5aiؿv">mۨٺ5W.^}4[AxJpDE/Dq]YI'5ܿw/CVhV:Zh4)L4%3{B&gc…nݚ;|^a4 Gl9q 3f͢RHS~ݻ~1zy̙i֩sg^\i†ի9Ȕ=r(iтnj %9’"tjV7 2m/i٩UaP'-*}6:9 ۺq#&陙={ݻsː!8*WZ=˧o͔WO6Z24 T¢7ްUXy3/ΞmêV70:ۻ4j= r~ɚ5kxw۠۴^,Q̙Y#9|mV;WCgx_vZE;̙Bs'v$H܉/ǒ͚ ҤI8dǏ?g$|??Jիi5x0UΝݱHs/]7 ͯѬYҶ=q"o™G Vq4ڰΚWmp? "IzӦ%%YyNBӥg1 s(C܄S9iKN&S"`( TjذHOKOMſBL>>EN[dVnڴ!yBk*fU6$)BxU硱'իY{~F IDAT!pixl(&?Ͼ*}=W_ۛOwWKⱩSiض-*Wfl$8IKJ"bEg~7uܼe mקu^+yl ( ^>>Dk)9p#G8Wo(4Zkh"fF_d?]mm}[v9 k'0!Ml vW֭ʕu}P!M6 4ɡ:5>TִGԭK=qRICQ:DN@zJ Y׮Gvf&v-R8Ν4׏{l"PsGPM"8o6e2^?YYDq듰};իsj^4ԾDQ.8A-]ZJ ~!!;raʕz>8Ǫٗk/}Ivvvΰ.7c9z͛TRq]ʺv}$;GwkȲyNGwŋ4CO˳z2._b?( Ҹ];[F,\Y*ʛs搞+=GӶQ*<6}wɷߒt q0綾/d;lNvh8Wu*+JΝٲIYYלKH`Ӷm7rJN'Oϙ3tj<*@6ϧͲez.6[%At4/'nZZñӠAo}>Xh9.m{L.]F}l=&7R{umdϝ< Jzt&; &//%;5y&ooSR-,(FT9ٌj6*W~3gb!8"B|jNU5gf=N Fk>k4C;4eeee?9/qf^`Mz=/SUnzkhk2's^G1!D rb0`pJccJ*%ۺa/Y!~.% G?&jaWi۳u+Ͼ۷y;};S_~_|={EAnJH`-0gs|S8eF;wڵv^VSͱb.x.\{8~́sN9kέK޽TYӨrTߧN)'^HBRgZzM& 7Aa9UeK/ΜICFazL>oQtYӖw+x[8'Np%jDFlEuH0[zg?ȟVqӝw_0( Ut6Ow}cɎ[SrN[N@96n۷mcɧҮ{wիs}rjYk+~UФ qqrn޸v-ZطC[AԦm=̚<9{کS'n.{cˋN X<tچ:WTmlQҿ?o[k6 \LL-[ִ)593e {[^^s4*$^2,ooN>=G˺ucԤ$]ΦAd$|37uùzeHbI0Eҥ*Əbh(̝kO^^y!or\2ڵcaksNۂYضgM9q/|xm ̝;s:&IO9; 4*P!s|jXi:8jU ~d&Ey7:`2Ҙ:z4zAL0V߷/7cߏj(xuÌʫP!80I}kS={@8y-5,\z:Mڴo(Eݪu6EXx#--_zrVyM!EXfޘ={p]wѮkW«U#x ,#.[8 ⓯bkL 4e@$8t50˹RdY)_k׸p'N`^bUS jl]( |҅[;?w['@:uxn xI!gedqJf O?h$[L f싅 |jKo[h)m^4 t1Z+ep}q__^Ӝ疶Ջ9&[UUCώ촰!+G*##Bl5ij'dܽyY8Q[yu>n2{,h%'o2KҶ9r"FUE ǖ'SV-Zjʩ6u F#'/]`r2 <γiiX +T`mXs#c0HHIǎD5n!*{<%;{S-;Ʀ8f̚… \ȝWQu*XUJXժ-ټ?'{/u>Ԭɤٲ?oϘ˗3z@z\۩]'z͐c?v9S_gΰyj7йY3;t?gdh WZTTf~;ԞW'J6jĶ lu(6,?-&__|8ŋ{|LAJ-sC6ν5ܴ)K>ʄѣiکnkc/{lڲڵ߈ fSOԩ:iQQtkӆ[:峮^e֭t߀iVuX2hDX+mNLC¸{@>'vgnc3WGpH=zӰQ#z<|XԻwoQfU^J;y_߾V`Ԁ9qbEz]̗umܳs'} =6e3ɶ={X,S/J3U{iU\By??jժ9r#3z4l[,9D-1XG51m<xZ5C&$/Ӫ~}Eڴ xYG5WVU\ł+=t(Q| hgmٴn-[ڷ+hN;Ś/sլ]b3GxhT>?I3g:ԋ}i[C7ڵ#.> rnll,:t7lȕ6kd$hlM޷][W_ѡG4l+5ܠ un>iܶ5KÏ?bT!!n֌J2׭=_K4{6A<,]П/\Δfw߱Wxw];lElhҩrύx!]!Ʊq&zvJj^F |F{4}DŽw1ºa39L1cX8w.+ud-Q-[ұY3bccqml޲zsTm7yy__@vV6'O&5%\xJz3͛G=8{,j~r{诳~Çg6+TnӦo:w&%^^^ӲukZYWE륭mYIHJLĒ""#tD֭dNӦ<6nlÇjQuxL&C-3TgaD5hXP46s&u-AF&ѐ?(dRwE=ݛcǎq(>ޡJ8t_}fu/;+\y^}C l׎%_}EΝi­s>bnԮX3Omι*m&ݺcYYKH`}|<}4wAiS͛T-?ΜY9s&+;ulkDA>c~ܳi-w*:Vt˗/& :Ĵ3ݴpΧS9sr~&NF,xu^{<\@:jpNuxnvÊ|ϴjՊ&G1C-f3sgɓ,zMœ4&՝73r&^x1͗,.N:uxrv_O[nqx+xhp} mڔCdZl;tɏ?Vf΁U;#rh^0Uqc&W_%DQհfah˗uj}E)gv4[7GM׼97缟H{98OëVe#i|a{򺾁T[Zz2.ZDUi%Cw 07T^ dlc3}dœ#-%_|:U2elL&L-:֒%ھ|#D!C+sr1Xrw؟כӧw`?Ӿqc:Hs:F#?).\nJmHI]'2{w(bJ{|3Xt)];t ,""[f{`q9Gֵk9>+Vd}:m33Ck#jчЩS,~5V5!nߦ mƧo|3|7Uݺ[=t@Vزy3[lwMS3L?zPF rmpr+_SxǏa-K.l#fftpQs]-WFz 4|8# ï&-#C!P!WZL&Si}ϴ͜Ip^ .'&[{TB᧟:W}׭ڌ}$ֽL&g# |_om1T:3}}[Mf <4L>qբ1pH7i 6@Zv6 \IMekۖ||Wi pi|bLqϟ=9Nֱz͛ӵU+~޼w-jL&GU'``ɓuB}GVzzRn]"6q>/8U}˗IEJ[[oŐ!l"u/=zp,> :`Z @uOV `4ݿ=m!av.P_f͝wz4!qؚoָ1q[ukGC5?t^NVz:/=^F#ϼ֩hgܖ-״j֭ۖ] Egdyw M6lܼ4f͘r!{;se&M:NYES}i gO:&;ziNJ\[naܰal]Qu@^x{R|}y1: !{T=f˗ϚEܮ]:vLll5BrM5yxx&=̝ <&u_͗LJnsWZաyH8xз룚7gmBڵtsڶUH_>NhK;FFF︃ÆѸ];8uݬ~L?QtϱUiШZWՄ/xpHGFrjiY}7UsvzYccUzM=zPE#S}a߀yN b48 66t(t6Eihm}=q2bL6jՊ̡uι97򺭬0~}gO֮>!7(j$fO@Mw J1dQEF)Vm[pZT(Bk i"IQ٪8 B Qa4AUn]lmݺ/zg\.$3#O?6ex{ӸA{/ 2z4k rm\ Y #///B˜ ~{/6*tʈΜɩ9-P<=5#кg=ܥ 僂a St4|}=SHR22t?wEUɼv 7jDpN‡K:ZPs6+TQ (Qћ+,$<~-ϴ98t_}k5X@YaGSJYtX+֯ [u\壏z|+ n^HH|ʶ= # ]pnHƍ N3X9a5koU Gvf&gN]ą{%~*X[nطn4[ T֕C""ؾ|9U :mvV {P:DZj*/(qgX dNJTQ#ϕ TU%oiR`(I၍(aڵ[,bH;twT(¸pڴ)믦GeOҩSYW`7zyu:[3BދX:W> 4S8]tɾl.r8ϟ=ʑq N_PG=m/aۦ\92ҊT>Dm%?ϳIOXZ9]HpjU*@<'Ŧ+QUP(n^ l{]ڸ쌌B)fM*GEРgON͟EH}O@ # 6Eհa#F;&Hs*TN| @Oڵ( nɾh"{~ҸSlNnsCQBk`=|7o^f3!5j+sZ9sFl/5ݛ~H0sv6Q]( Upj"-("^ ͊9s(JbbEQ^xqҖEΝQJ8sgfXF{ťbqMsv6՚5FnUTFcfP_( 57!q ZUu8޾_( ՋЛ'$"WrDuzbA*9+(;^^En fX!Djڿ 'Q#"oA*o醄sxͷ|y ~CI)A!J<rd4G~B!DA`S~DӉ E( |ŊHExFTc$Did߸J[O++-R՞5khݚ0wGTbBjTTǏo(iyOm`eJ|G*.s"n$C£|(oTTK? !D![Q"B$ARQB!|1HB ARQ-GU! I !Gj$Bx+9wGQAPIq՜bj)X!\ʧP4| B`Q L x:lxg`2H !_!QB 2H* !DbJBdK$U!B3Hj$OzThdBxARQB!_! RQ-ZIB B #\ !`$A ~ң*E#C32H*B!>iB 2H* !D_! RQ-LTvw$DNSWw4RUp(;lxu[^!sRQO񟑨j~E4j^d *$?.m-.;"x%'suT+UdDIQ^U3}EQiP~&d;.hٯ;&!$$Dќ즪twLBS)˔bJ4wܾ 3E'dDqh*i[Q$1D^tw n>0ݱe%smg,J*:B ျ#Qӥ,[ʥqB]|Jdji3Fo BQ"cE)';)" !:,*%D*eTTK;QB"Qy/+U!(!RQ-*Bɯ*#!(RQ-ZIB%RB1B2NzT$ !zkBQ2Z&IEU!(`$%Boq;ɏK%vRQB"B!GLBG !gŔ$vң*E"CCHj$U!B%9Qң*%C*eTTK;QB)U!(!CeZEʴTTBң*DzU˜|>qc!*8XSPc:^B*]7@%1Q4;Üip%<Lrw, r?PUry_h%1&-pw,DIxݱkX`roEogWT; 2!Jz24KTC RQ-+J^$*CB!Bx !B!(RQB!BQ*B!£HEU!B!GB!B"U!B!E*B!B!B"wkn%292\KΊCٻ8㟙RTEib{EuW[u]emV,wł" e)J&? sNIW^07'7'ɹ9%IX9/jRi-p40/=!Q /(8,d^D$Nw V'/؊'`unNzs!~l밺0bwʈ*S`ן? |xlZ!Re1 9sv2:klVDų8a[(5`ÞR =r^ `+.$ЉODJgd4<66|,XCWI|m; ,&Q<l+똷 h \7Y \;k\\wrT<'q{SǑ]%@y X[ IDAT_&Տ}Y܉ʩV3r<*c!_6a6oB.?Pa=zc7oƽXI.@㎚]دMs>]o\߻ Ll+ؼQ}#$l:c+N4':܄X`=,ؗUskwzja3n8JRm+}IsAj-l9n?Ɇw%E)m;q,Q@iqf:Wj祂+ﹸ[$Z޻ Vmz_\/HF(rԵAiL(ݕ$^+Z-G9k\U9*Y2[-x1R[& TI䶫+|gFɦplx-G/w~Xulm ǻgU⟗D$5\k8|N^9~d[1z&y`eb$CDR4ؠw)V}OpϧF>w"v݀tp IM@,"KO}5q/T\b^MImf]Ġ] +s-/\^U^R> HɹO}5/ԏ9vdpS-`-~f؆"l.!_b =c{z=B\/l:} Jl㎴]?<Asyo[,0\OեUU1n[(Y[j߲t?[8Ǒn(C"ʋklCN`We8>r[!m$d^7Z |H=]0۟.VlErf𮲵*=aeX9vs9; }6cV=N.ނOd6F7+VYFSQcF0q'b;{`)|dr^M4RjYx/z,`.\C%Kz+ϡRMf'c{ 5('F< J vu)а<I`u4lFXv U޺9}C%/XϮK_.\I U.ZZ^3HފbבKWV.oÀނ4lmTTZEe\|OyeU6䕿`籠+cekoy;{AsOm͎q~% ;QmVZwaM͐$;ϑeGGc I=GDj2-8HtgFZ."lW5|ijbP95NNp>lt,gt~]Gӱ3da`ݰlœ:8Nb*GaÞʶ(-kͣ[w162 V>=}AnZitFa&`]bp|IUH; 9Q=`ѯX+Vcowo<Wu H+{V QtXu#7_P#?w{qJvݛQh+SSKlwUA@0/ϟ ,Hw'NGȼ,(LNNp-ͅ9;;"Rw8+uZ zG"I2Y_<N^D$اHqHFQ*""""""Ed""""""QHFQ*""""""Ed""""""QHFQ*""""""Ed""""""QHFQ*""""""J1|zP"" %9x6UW(RQ-nHL]eHL:iY%1;"rmWD";%''j:Y:i#t$rr,YsKޣ*J. '@dΦHN_;ΜyD2""ɔn.9; Ff'2{tgS@j9r5kn!rzDDrkc/XS-0G""O]}oqK)eHNs"ɇHESD$TWZ TEDDDDD$(Pvj)""駲R)'*?EDNuJKjӗ[DtT~JKdNP""SD$TWZ T""IP~I>e+-""""""Qf;BOSY\i)PvrN15""RvVZ T""SD$TWZ TEDDDDD$(Pvj)""駲R)""駲R*""""""EjS+HD9*?EDOuJKjӗ[DtT~JKjӗ[DtT~JKdNP""SD$TWZ T""""RQ.[i)P@5۩JDTJKjӗ[DTJKjӗ[DTJKdNP""IP I.e+-N_nS*"Vj0HFQ "Rz*CEDKp@5-"Rz*CEDKp@5-"< XI2eU WNZ TzTEDJOeHzf;}EDJM"겕U( TZDDJOeHzf;}EDJOeHzVvrHQH$*C'"TQf;}EDJOeHzf;}EDJOeHz;\-|r}]ҝE^SvF߮˨HR-nNAld^Ρ$Y.2X=6~;tgADR+8Ow6$ ЧIh>t"] \Θ.E0F<\Tn%ˆT;̀n+mD<@5OAoҝ ԙMjOuYt+A]V)HFQ*""""""Ed""""""QHFQ*""""""Ed""""""QHFQ*""""""Ed""""""QHFQ*""""""Ed""""""QHFQ*""""""Ed""""""Q;R~{hdycҐls؋@4E2F^ H? 8X; (e_o|6\SC;A~|=)@سXWqWcbٲTH.V  j5E$;| H?8wVUAbS-c7\Ӂv=ؿ'IS:`؊SJR6 (HMm\68b$KD[Bl@u9/Q(HMU$.k3=?\ \C$.Bž;ϣ 5eR.{ p30(!}sT' R:CR&oadKpWcRq,LwFD R8?R6obi·쐞@i.X. ,N*S"(㱿a# $Ԡm')ɑIk3$ yҬNze%R~ h}`L*X06vUn:H- |{^{;`na߁W-!amƾ3) V`6sRrVVgZ)ˀXPH@)ο+;HOQyb"j"A[޺cOG %6񼑅aVWcAo"3 վwb ̫T oEӱXe.Vv%b#@SVᮅ]ow> ?)QGD$׷آepy['Z+ZC<&;4xη'V?yS y벽.-b}r[m-uR76u-񸿌=nwpVnMtغ(vچXY6w:زb)ߗ.{8V.x9#q'G$Z0"|Fڎ ¢]A#wݏ$r8yޑǿ ©qQoyOS{KK.<ϙ3u~ 8^K" ;9{#D3a#;ҞK|irQ9qjCY ?ķ'%@HlW-e+-ؿɱD8n y׈P/{]"s3uNv_.Eo/{xJ"#"@5[NlOɾtO;y7! >KY~'vs =vOwYkDe yؿ^D+u ‚G;NƑ1s<)N|tFN!O.."ڀxs@~HёB_֎43q>N!>#kRGorKir^stw⽅vXYXK:= ZuG݃2\RVt  t rM:XJX8$c={`C㵪{my7P|z/L=E`*K~?bW.V:`_72^f)G^'@?ˑNr[)6ڈXp>6Wg>06:,Kd%6'컾67Uh)VQj]]>8!pX- l(^mh; EHvm󗃣+ǶjpULFb@!^?aei+OR|ρXeYܕ<v=IŸ;\>!vM1js0u&^E벇`uY#sL!6"[cC{tu<;PC.<vx]v6s`eX{&{>^yMc]p1p+V%: -ņzߘ i__n=Ϯ9ؐ,ssї4a/jl7]SGT nÆ 8U#vk/DF5tP4hy/F*@_ᷫ=VbC#D8ۑ~o_29^XC 9KGc|ijW&މ7|/=# IDAT w^O=Mjբ0=[xPzz<_9CQ U| x?:Η&j{[bqu H#iYFoȗ_]#_@5j: u}ؗVx躹#\u4:Ҍx i9ȑհ{t}UHW{36{pNv#'Wa kIn%Tl :y"J.'Q[9[ٵ Q Sa_,VL8-!3N○ 7;>;w9ύS9[9u#x`Vs5gJlGğf៹*.!˰ڍ'>H b燃y˰t`C+VC #̜thMI4?3E-8*g!v?p5n<]a1dV#: ?`l(HDalx\pG9a'PT.*eov[86+20s]]V8M{w|6lXuz sP| G(*_x=VMMyFql }~'Bˎkѓx[`+F`-m@uXIĮE$-W#]?Ǖ QY tJ*haDemYv컝@CQ9ۊD׶ۈ:XzL`L9UwHy[|baa(ecyO~󰎴.Owc&*/x [tbwHcC`ߦ&:d Wb-a &/q\|~,[ÚNJÊ>Xk<DZMl #qO{1}ER%tWOloYMle1Ѱ,g!qY*Gzw{M@&Aj m7t+qKr^.@"Uá_$v:o5w`Xc2a|JB?`-u^ǐX*萠\CQ1Ń\{ڀҟRw,O}`#|4`nLݾGlˎ|;քs=>N¶sJ9I^{lӗb zte79SႚLf)/`=^}~"3 luDWauω]BXu$SBR 5zƾ஡a/~佁{ϟ\V`7N&6_2a\ !˶b- Nnhj߿~ IJʓK.VFxWlBSi&Wl+ֳՏ2A>pz{4u5 b{D`oDܭa;֧0xC)~rIXV؟]P@b `=XAfaDz|%r2Q֫鯣eE='z~ކ5l6'v,6aT&&Į2sg/@b_DtD lqjñ!b%&qO\s3Eg:lѝE lA.Kb;cC{ƽuĎ뿁F`JGo)|l@@bCV_mS$rl]fEsVà. 6w(Rn3HtL\*{U_.C9g(zO(B ޟb m\C`=ԕ` "xQ-$Va ܈^b5beߓu=P< X$N[n62":RSr^S|Jbn,:bG܄].>x|iP} 67#xIO =:q>D}Bڊ!*FQ۱EP TFӼ9 Įs,[fz z^9\3ar)?o]>*Ho]CH=LyX˿X%g"N!ޡ{~î{ ~5\Xu$CmLKD{¶ty3bWc痞ch5gQG^- vϱN=[ _MQkkhDl~k3R1k_ jaz8[e2jbK ?!=|٨-v!HoXFl ulCuc=A-Ͱ/."钃}/\‚Ȅ-^~;C/æN].Z|섙?w_α: ^݆5\w߱,Iب,bW`ZQc屫R m o:e6b Uam? j6}ͻz>Vks Vq@FX)8ya8_s<4m]^Z~i Gꏵ^&j$b%N`<\mёE𱖅b}36k{8kRU?"̊r`yyKgdkoܕ[l7Ѿu`":b/<:S4\zZbO"b|h[dawr-Gjxޮi]erj=E">ʒu"? Öݷu/xz`k*>_`r:+7®~[9ꨁ]+"kpxw<*Q[O@Wu-:fq.V޾LQ]vʣw?b0ǩ62߉$܅-t.+V.{&s2Kw]6u(_v\Eנe]]gw|\<;=rrj*R% d.uM= |2D¶+"yYאs5r$0aM3%"3Cw†pw\XskEN+aZH$2=eyJ6`~ka\ITa[x%yv\"^*EĽʛDbX_T`xf23//'s9wJ7gQ5jcѺkXa9SNa5٩I'l{`݂hbt܏}ZKyuګk^tm K?Hkz7M3V/2wܸ}GW|\ѵՇ~-Pݼ}3-j*PU]6#5Pڼ}3-s*ίMju!:rru߫뫻g ϻjӪPmY0Mf]wqZӸ>7/й3+4oke0>|)|1| lR{%ѣaܸįwIvf⏟z*۶o0q"|EޫTjK/e㶍;~Y&uեZ^5GpD#L>/29GD~yŖ[QI!0hSUZ9t)LlGYwmжaC{ͧט3&+ ֮rj|xMX"{M?Z;Dظa׮eʕ~G>=Vxo</'#aW]Ő`5ۣ[*3F+/C'ږ[Xnюu:pMkU=nشmS޼:T0u=#H[ӻNܬVڵ.v]χ?Ä u=<oN8N9<خW.۷âE>+{@2"Pa~,vh]S/z'u:Uϕܩ'uسBiH$Zۼ}3Wg1V? gi@z޾=7`_}a6E_ԯWsx"6PҥD o ^*Ǟ͛3,v,hr(#IF٭n|63Nz$^Zsc6X.kPn4SNa{ѰFCx/M}\^B{ :t}E wjPzl46Tj`$wz)׺uCp5~O}&hظ16f`-|s̟3'0}jӯvݺ r;ݨiSqF֨UkU+Q$L^29^z}iNr:'y2Usry٪'%8 k_:@֫<o׉UeKVo?5+=yLT_=G9oUjp>?f&x,j4`L\4[Ulqƌ>Ujps![E(Z>e]-,UV,X %(8/*4?/|oqu~_»eż;]tpA׳QЍw֬K^қ*W%뷮`iVY_K.Vr=ytssrmXOLfk3_Si2vH(|UvJ^^/ԽKqi$RDG߀#Kh]v=mmXcѼ?'WqLyvب .;f\fksBWpI* 2.[Q[eׇFқrVթ˦rJ5F^N^`ڪU^z{<úOM~j7M_eRXY]ηcҢp{u`Ew}[.7'7T9U%7xF~^>C:?wܱ:*% FgOΔןFv۳$RX1cǖ0{5+ȼye˞אhG \mmeiSZ!> 3G5*ByѠyrhXaOϟ|sֺnp>ӒxҎ㛷o)sW_E*2f̎o"W|3UVġCa„A*Ҥfrsri\1 .t{yċտ>7ܣ#j-Vl.[x縨E߹?maj\[N88fT$n`V[.Pvdqղۦ/5 J1Pn"lpնX^k9~ IDAT؜Y2c!|0v?Y)Gr*[D[l^_/Xtqc𞃹xšb LpE0vYï^=+uVQڿUq%PPP?>{S5?f-ZnW3iٺus>zݬ^ؠE xF+V 84חeճUO:52$:IOrQngO=5}[kᇶ}̙Q r=`x- X |’ƌa1PV-?ᄸ+Vt+1[lcW~~ѺlAg~[h",=əOo$BvZ/h5k!v>ָ9v,Gzym_o{mG"2PmTwb~\#g|<i0~;Ӎ4n wo|1?nYOओSǶnKsyJIjִޕH*Y6\Bۋ~RSC1ZEhĈ#*WF?opFGudW_ϼ^, ݷ{go5-7!*dACu يItxVXAF4tUI5 ׈ɋ'QJ NT>aѓFss9|<7NY ۚOv*zNv |˫KpǒrTiV=cI6`jyzȭtk-r1ޭWothV&V<<zJ{%Ɂf`K,FSS7jݚw{`Gw8:Ya9j4O>thԁ+f$-O^.r ö/s≰>йs񅘒 ʟ5jXivV:H<;4d{G)DN]n;lݶu=Uݛ6ԯ'WJ :6ͫ .O: n-0T VT*b_n]e۴4K/Y6T|՜hVb8UYv Aw;`Ή 4K]RyMd};SYD`b9<y}_x!z}Šnk+A'@*\Pm8|xMR֬W_k׶o m[aq%eU73'vmХ l^e?*f-[6[o)(ZSǽ2g-O< ᆤG*~;j귯6[=is!k6IJ2Gtn -o?zM Z=Uu%Kv];x69KT@ޗ3NkD6ouސҽyÅDRULhnCօ#+JasxӕuK;@sJۦ/d[[,Y*[ lg +;{5݋F51Pɾ`֭Ŗ~œӟbhYSM^)~ڵzjZN͞ n:uM?Kd 6ل}mNA:?j`|4磄Yu=O:cKC$[xW3˵MjMn/ ¡tZrpqV.{ͣwJ214&raD T.- ƻHk@[A g,+6nٗ]nQt)"JIQ"8tU)5sAqQˎ|ksn5E{K6Wu?{4ރi˧{*,o]}?lQ4eLBkĺu6,ճz4d!#sxw\% kӶ^[za۱훹 ysF~=Z xK7k@Zձk|q,u> ٿ~ޗ٥A?wg+-z(m۬sg˺6/H6য়ʞ7Y?[]wf՚ԫV-s.{w<6qD}ظ-wLJz/c"JPϟz_1qΕӛj*O&zW b[^h 3]^Vgv Ҥys=f M {3&|IR^+w*`z7~J~pQk "ORիVYmS [nپ]kO7ڷ御:f_cK;M=[)/`[O20\t;eͩpz,w=)SҷSޣ컯{m)YȺlF{4ރqCMS)`zoT\*{O'9_7=mXFMu(9ܻ;Ӗ~6ؼنBk t|la~`ר~{__ݞGTz|}4- pwӒN 77|bѺEV93&v[*OMy^9-k\Ӧsp=m=˩wޱ={1ۼQN W_0Ɨ~!W"`AƼ '']wߝԳޱUospaIy螨M }yzG|9KШC{c{eYޣŨ> Fېbo]6(݂35^㼎9Pk_{fL0$[>`B`#mg5mU96h`F]w]CY}ݲ/dD~zVoZ͊+x2Mi%\Ԩhı`%)VE=.UV u f/*&M`pWom̙3Ѫ{,}mprkG"4p.y;>켳ryмp;W^C,~pqE?{ݻ_I$mXy k6aΪ9L\8;3 z]ZT(YKS_##?/G8լcJ6_Zw;ѱyG25k4:o*ֲn-{NY3wͽ: oszɓaLͭe"?7_Uw+M[w7ވz&j-U!}{}{_]vqlP;4}{ްZϛr4n2 |+ݍ r)Nys@WjzIEbXk8'+=`Tˆ #1aDj .DᚏᚏյK.[[^dE~dgDOȂp~sGtbI澢1kslj;mmI3bMlzgkVwԆԥ tr\XUEիVɻrǓ۫zE=ݪ{j45҆5Ũkwzl2ڴ˹#گ kT!ޞ_Uc7<9'6FSOU-{rKk٪6FS索"Sf5v(L4)A` L017z!'W'!aaa?Q5 0 0 0 sT 0 0 0U0 0 0 #0G0 0 0 H*Q5 0 0 0 sT 0 0 0U0 0 0 #0G0 0zE2Daa&mVِ%0T'Þ#FP̵k~= yD[aEiKh+B0h[͋7ް[Fp(^|~}o$![&ڈltZ W0}-ЦzþP$3N/Cz{SoFꍛz2V|o *ZL;| YW.JUla.} hz.}_ ,yӊjeo _Sq0eGx0XQM{SY[&FʠS(ӠE?ʚ>\yHN{Ih|n;Jm'52&S'aԚq_ nSw;p]kj|;p ұJ ceac3Ma*^ ;ʞqUgpͭ3y;&ed{"84w :Dw ·vAWlFFYJW^ M=FeVn^;ο W]u2/FBh76ߦYP<1 {$Ξ9I@YS58uk |:|baI˳Buybdȷog‚nN 1Ũ%KFۺ/cSx%xho #H9tx Zo/φ9'š˨9_ۆ k9QM"3NuQr6ݞ˨í80/zn?[GHo_wLF0|x[/cOx -4Fܑ2^ uir^w\o{x1G5: &/_I'&/K@~l3-8K"K@xph'KIzhw1jǰ{˹֤L$ #K|t3} quԋuM"^$D3`)P'nz >E)P寭{0qw/h*0%YpHE޻/?NI 8 shaTF:h:^9%T>7] ôm7sT[3`yȾjr2jp곰$x~Q8E'yȹhv3!$1gQ;8uRQ)xK/H &{60O*dwpNji)e.:̲=81G5)o ]~ ]*dM}Fy >ٟY< SCEq7HwEQf)Yx#yPOH.?Ū >@{3`H-29분9'G݌]T/tF|1G5EX*c,&/+=q&಻ݍ7B˕q7Hqr< _T 70X@ Ƌ߾S-͒ȓ׏L= sTSf0{? =VÖc8Ȱ0Ca`KPr%09paTvd'ezz9HYFD | X@Ȟ@tê\7K5Rh[.'q4tQMA 3p9AZ[?MFɇ =85rLEp׿]HA#iVm+^K9tEŅ+fv> 7FjFL'\3?ƉbǞ9u/^)lgSJQ6lT| 6^]_aelRLxL{=}-g'&0cӋ͝Ңv'pBm@o X%pZ=0S] e{~D4Rrx"oW$&F`~a=rhsX/MyFY s+(nT^$~`74 GLMy$NT ϭˌaj3<EY04[? YFX'qˑ x#i$5Z6;UM}>;o<bO0җgiJ[ ˢtdĆv\n%sTӄ,f}%}2dUfI+bx<(͌)ڟy#م4R MOFos \>@{ `,p;z04wKMF*fusW}Hۆ 8am?6۴+wJMFk'fc4)5^wl][/('3$$ooS$  3; |Z Cvż(txZ܁:'eFY^I] #.OM7M1br`M|:o{/M[k34s/ecQsBk~G&Kn?x߰`0"Euq|MyμΟn—'t0igpl|9j:Jaa-ٳ٬EM,ooeF'A@8/Iw{&ڂw IDATݪ)öwӢWҖz֏7/5`;EmoĞ c%t]3M!r)mVyy&?]YuTq+װ?T6dp0Ք~mJʑx ف۾+KUF,˼@_ɪd?b-C$ x82bǥ~ZhK vU\Yk4?Ͷ^b >: , ~g{Zlc OUYoC@3X̼6<`3h}i>-?я=1?.ڐ̀tD.Eec}m_1ꃗM\kdfp ~D:ÜTt[7I-Y҃90'5+}NJZoJEF4Qm@+3FNd闗øtHazdQWENdGpmVGوR`޷'^wE$4ApnU*7rcѫ:3| XhѰl1.Cdk`C_P{,3bMo`0OnFE,2*6a2)]ّt:3^2;1@ƙa9ZEqgu?leJ4RF ~Ydl sT(eݓY}ͷ{neϗ\i Q܈S ="="V:MѲ,Okc-8YDEETvҐ`o]_%#km1?@^-88ew`"xa4,@QYSeP*12#\sYNe<TrhQ5X)̸gLѷO^4>>B#P]GhI_"&"jQ,+ͺ%٫h Ȩ/>øpn=,a3/ 0tIbAd6_k_`=2ZjƝ\fJHEFUjP+op|&];G#d3Tg:M(`"!M\3}7idQ<ȅ@ގ2H<Ͷ-:l򯆲'tӍ>|S^&)AU73siRN z^dw 7)TTO…e'E<8߶;GV ƨ_^&ޡ8%1B^c48$XȈFE p'vh3sTe+@o?%{wl>2NBH`\`F@1"DjV/ViF{PHmʀ<_j_~l1@>i`6k4dT4,wS 3K"\ȃa'Ɏ9FT4oߣ?d?IQ9}#F T&EȵD ŝfQXN3ja$@+3Y,f+]7! t#SsTMfzf3+~<5YGt'|,3 wp'.i LCDLq"橶v<٭g'#vL> 7`)!XKa#m]M}mZ֘yJUFC~ZYj\jIƈ dD (p3#ML+WAQ[:$ne7,81YɈ dAD5*F=BIs( ?Lf X%ڒqk8G7j]{/ٴd]V0i@T~eFl|LIt]foTϖj:#J`4A%X<]_"rag%#.]8m=UHKTX"+T: W|u@ra$9 xz88_*#!\>r`/T'VrXsU'C" B9,VqEux^ @D~ dcT&߻|{o }84Z\yUud-;"=pU~NNQF2Z8 5Cza,S \Z ,׷cq#yy@.dcQ5Jn)NSoE6.'okܩFxqxFUM`)Up7JfD;*ǝ4݈4-Ij;Wr#n ̸qCvm}#HDvéW%>ϥʹ|`~''j*"/1DFE4SNd60.uOji>sU#)-q;rG`tƕ8@;+q'GE3݁ۀC+q fW!"_fU5(XV-=Me@qCt~çbszzG%V~S^#*|#B^j4e r1N˧@_*@}9o~9MjkЯ8c6Xܦo@WOxTLEF=aFRI 2bsUNUw"4W?+`o IDz@u"N'D'1 ۨ)@8_TpZ1nL IdTWAg2""D$ :pyꡪz~U LM@Q%CF$ z?*F|L AN1rR[tRgZ[i9FPq;B_B {BD$ig 3 Uֆ",mR5UOdQ=ׄ""ŝN F+TSUM9Q*U8b@ۅD3ҀІP`:.bPWeFm`Y&@6>/U 8)F =pyu$$~_#)-qE8uSCY7IHQr^.Q5uՈ#N,k0y-hjH"""p+g",hb4g(qJS=M75_2 "`\4ցM`SDN)&QW6g?!ֈ "11 YSN5غ?@:㈬%3phq?h (Y4NT(O'đ- [(T5.Wq<]gɈ7>OBmh`W?lIDD j1nf@mH/` 8~' xNU8"%"#qg9B0L{. x9m0"܃u _'';><cNȉTHqtmQ'!i<[(oJRQp`o~؎bkj w ./sIQqψDSN+u"K-nWm1Uw:0s3pG(hQLV83xܧ "< FņJuˁ1?0@v9Hq@|]Z4 ܍~ 0LjIOQa[{09PW5FyxXDN§?qZ':ɹx=6'gvNUʼuå Tv{Tmm)>T>$pbq~ DdG@T+SQ7sT(Ozv_ [(PWz7GO>TDup'Ot[D^FdxWD;Q "RY M =訧exOJGUDvOg9TXAH<^ d졪?G{T\h|Ӏ_= 1R U]3Tz)mWr^*ί]Mh#-yV|4sT(Ogjyr,Pn-MͣS_l&NeTgN{z9^SM9zZDk8[WbK\ \I"?T^'U.5 'Eۀ#TuIHaOT2up%L. ~LFf9ln躱8_/+ηF?.C-$!`iyQ{Yʎ{Pյ<͛BTF7TE P TbS*p*f] h;&X0HK9X{݅SU/DH`"=e%0@UQհ``qOo)8q`("6GCq3fT`XqYe9"_|ц9FJS?&ͮB$rU-W+pIP3"R?# QCqB:AY#r n*P:W~jB0F)|>(D\("껎_fp!:X2H0w\-W=co|beKP<x FQ0q0aE2]M9\3 88_GV#+9f=ί@0GHyt}Q^=n {?PD\AU/tK{ AO<_ U/OH0nS|Q ejzADkE7'ܮ@GܩE[zyN6zf"?Ct^9 -jsB9*5Wybt* D_ee8@U}G˾W5@qCu̥gw>^clSwAU} *KST(NPS)cmN='Dj "qeV#q n:H37I88_b ׼8.l4 DՈ+"ҹ2jDQ>;](JϹҧUuNfZO,4\68h5{[9:Jfrz0Ƚ)iV ON2xOUFUN[b養LIWz&I G ߴtRr12"]_d$qucpRGuпImA:8МT#*"@6LQ5⊪._E$_DZRp!I IDAT]r ;Tu6YP1 5ގsJo9]  [׼^\ [cvloDᚢ<բ< <)Н˙.PzF>T`fk[$AuNX'V/'qϏQnR 8pɐ6:Wn w yOTlZ}dmH!"8@W)oU=YU*/ɪZm|Z:\]'` B`8VYgKHx@:-"gQ.չJFTh?qG] 0 RyDD{8`aOhelTud+HtHT:4Cy 4# ]NDZj-N!+.5L#\L/=kZ NH.yPmXnZUJeĕeywzlUKґpa^NąPI@1*J \uſr d+\ՠ=p@qΎ Bգ~-#pMmFU#!l,"2-Tz"fyMEy(O…Έr;s #g03P=MR_y,ιx^FU*B .WUEUSU_o'Ov՜7VtNdȭDR}쮪V1#Qzk6{T&9&܂ s. mVINl o \9qQmE&%S9FPձD.tvEd4}t<4Im}pBKD:lq!FC@u N棉te>h@D9 `Þ˃!5}}Gց1ŞDUS:C)Sq'W1GMնDu%N\iUg)1F0O:US NR t^+7aK.QmgoP;uTI9F`hNDžϿ" |5Ey(Owy[ʘB)|ꋸ0ȿbb~!o-#r+vTo$&*6_kd uDJ9O"qϢ] TlU]_9~҆|"NN"gIũgD$^.|9wj!?q~gqP(# jſb4PQ5v?~69NZ6\S-e\HQO&J@h"RYM#]Qսq"!AZ#r rN&R7ܿ~"ҫd/ D$@U+̋v`p'TB (pv>TD'NFT tVUfH+&_L]9Nlq,pMop*Fzc'FRvy(.4Y9$Z#`7"CWkbnw pgy" {2 >S_!:L`"czMӵ0J-l Ef4QI{Na'5ĝo4OQ᤮7'5*C :FGZW9RWA'5NjЖ7] eNj3 0ԜT#9F2QYkʩxgoH-[i|[(WJQ 5d52Py ,ʨ\폈'˞_ba]~`0wP9I+"{q^Pr~UeۀTlVՉi ՃCY2O(E"Oӽ8ekRwTÎmpwYkrU{6DrSE5!j4`Q5`\h5+l~>\sj.(Oǝy=X?۾p5\XſnF6\DWx /9~r|*:N kk/݀}=.I":LNƽϢBr|-SD ]Ջ+!t~ќˈcr-q,5"@nťx)#U]/"j8tsPWr5n-g1GH*B>j01.d𼪾*Q?1Ey:W(ݽ c6\8վQ ե^;a}^ |Ȼ -Stpi j@7Ki,"q'AفqQ\sLh|C/j>sr$:Y֎k!:fliyWZ|}6 >Ω-F6, ބDc$/I0 U| nA-'"]ciKQ*sq -|'=cy#MPEg1D'K~~Hj^5{b["Q55ՏO$@!OU.9=mqNTaO$PI=ս0@F/ t--wCD1ʁKUPćPmzu~ïa9FT4tN7.E1i~\QWGӭ}+}4COTOÅe"MAXBꀞNEu{SMxXyWSQV=;е 8QUQ>oB3pwpaӠZsB$xjgoS MWp^>&? vLǫղH7.Ey.a1GHf.~P1@.pnWP$"/ԡfdEy(OƅO7GeBiyaxPpD -ȈDnQs p\] h lpoR9"2 Ve*8uT|FJho75 4SC|3"G'$$ri Uw3܆7x>6q:#'#1GHZTuN!r%PCkU&(5Cs3p;ȓDC,"͉Ey:5fbv?ʞ~DEKy0@Mwyk U?S""MEY\xc@[k%k=ܳzฐұQTWQdUu{k~i]ozi A]u6ҒBJ HQ5U?aV|yJD6uVi1л|=7)8m _S%#PꞸߢh42 8 )5(4^xsk":PEnQQk kN埍p7TʍX'/ xSR5iS59o<: wWU{^|S= ΨF:ZoR'M`]0W2nރp<!"wH?iQ~G^%K*fˍS+8A3qR)NE\D59r3̀7BCdUoڅ 4MmED'8ux43:+MO%bʴGC"OVƄWڱRLtTs d<S *VDڈ7=]Y|}V=hkU#U4}TG5nPѸӝp$m\˿Y*"_!ǵl"rn!~L=3gw70)PT3 jO޿Àh -8ѥNMjq+X;o[WY%f{^kcSm پ O~ŅF)1T-ZE+)Ed*5 Q3TJ?pNj@8u߈H0")g]M&JdUP-60ˆE/$I@vᵭqy:SZ8YilJ{"b/n鲺ܸ*ߑ6\ns{LV]&"-$U-S|kQJ(dI\ /g(}ίŭW } tШ " zF5 Dr*U5_N nZ*? \ Rh!FCB3;n/3À #;Q5RU95v>N*.ŕD p-wqF(B9&JaDGu5B h> 2wQ[hi+PiM?-}kjCmotRˀR\k'9FvjȫVBUq|w B` ۍ4@ XzTK9FaH؇ЊNB}wqUǿclz ; ,!@`:z & $$"! wL!,B'tS\5?Fwlْ2srNtݱ7;Ow>]EY[z أ'&zTD~ ؙ1N97{jUU2j%rzx=չzx6LdWrD*WDN ܫq]_ىꞟkᭁzDD.Uݩk.S=*.y {]v؊i؛SU?~kJ"aR^6Msª>_4ƥLdVi:+LwCtV~έfӅgC*.X8]ǎUOF!G/P4\DչdR#_}ޥqn]pMNުKcGL?l}-ώZ5VhWD2H*+ӎV Ol`5a ;v4G~y?`zf~8]UoJ$pWxſQR]x\JE+c^DoO$`nzxEO \ 8MfV'4[i]ɮ3ఞX:a7PY Gt}Laǎ}0:%XUᾍ5{98c?'Υ,[]L_ Rdr Ed0/x`nz8iMnVq\ުGL x@UlZ5ñQ&^IZ 3v|O0f=1خ?[k$"#~]:PRDy\ e nXG 5#?`M[Un/Ώ.:uOEdJi=sS~\^NۃF)j]c/o_  !(*k Js5"[AMzp.pB)_\/ ݤyڣV-m2A{>гdzh1;pеa" X}PD"r s.j)VƊDub-2,kMZx; ovx`xמYaORݏl6ع?wijj,?#_GTOR]lBe p'|Eչ-`tl5|48`NWT:D&s^M6 v3eM|z} rWJ4iELA~I[1*=Φ=_Qur:c ӃUEqQ- v-$.@ `+q.v"r0v&oz&Ky``t>62uSde?LRo~I'y*')^k0=u9J>*tDŎ uXss}WZk>,\36J({$k z4juēNumq÷OR]2Ǝ@d4LA:I h$%Bd*T=IuWTk0٢l ~&RN?O6* Dd֯aaGKQ! IDAT4?nOBut* 2`dt<a[2``r޷\̄[82k\)V`YHh;MDԹ^_v%PBbDO,H\T?B`DQmdV>w*״DfJT${ R5_Qur 1ӫ/e_Qz""{a=azItUn̎uK`+qUF!u_kep-"KO2Yx$`KOR] N2I88P\3UT(a[4Û";J9b挈L\3(~ږ6لm_o>+pdf&g?M纈l}?F;W/_l܊َj~[0eAѳQ=.\UT)wamlnwg2뚜X OR]ک,?~:ՓX@;" TRM]I~ wRyߓT?QR,Iy\+RNwfl+6+Yx+5]USՉ{V Rugm _O#rk禞y߃эܯ2(5d,HdW~F8y_٢ -3]v<Ꚅm.zFU_{.NrȒi\\ =9DcgBW fDLAΥ{8׿;ˎbՍysWTsr:XV0!*<-)٢ zk("2DD.Ƕ~I꿀&I$3[No#po="!2_5Slj噂 $u"'.eRI)|E9W%[aJUnY]E3bil&gFުNEpkgR @18X 2\wX9u}9w97UD^騞RDsJ)+ӬEm⁹~%"aQ$[`;U=nZN_o&?UEuM`s+fFXD4US};5?p+"Cb^1M2 o=Iub*wΥU\RNǖrz *\-?EnDhl[o8 W՛{: wdhvt +Tbv,-DZ8=kb0:kk nv>-zaOwz^OTsU}Xb7FeuQ""39T>I맻ևVW UE:`ixGe_ݱ8,S;7<v95٩x}7ws]sDzE#0ka~lTnZHe0 }DK,U  ؖc>q^G5ws 2 Tdո^)Ȝ=lTh)iԹ#'*dAutJ9sn*rz%Vx$7r(k&")V+LR6W:Ihow ?7V9XK7!4v׹J_[`?]#Te 2?2LRGkyjȊخ=IuUT)@L/<-ʙ٢ EbNrFZτZl:X1W) ܌}އ=Z [YS `I`~`r^8}wQ:.` yꜛf OuE94[0)r _#.QUu'U.X[S=VUTEulMTlB D8\Q=pe01Ymd8w`V`r>1HdN7vmz̋)9D(W L{rAA9Dd;lUq`3`GU}0Zdw*w0/\1+pk, ziwTd c!9د׸U}!W52= C>Z.fkcCQ57("g9`XIjۀhŀSUGu#S1WL`=g]RlMuKHR [`-hŸ }&7чjoՃ Bds Wp/qMnDF?VʜeU'zJB:3vz$0,2,aDR+ι>W؍1ӻdNA5Y[1 uNaZ$_wv1́I| "^ͺ)/]Yyr$U=Iu5j*T=IuU\(},Q:k`(f28䵴ع虾 V9Ud1`7+Dj2Vr^z"Kam=,f۹8R>RD:J)mէc -Jg m~l,I*@{~3nU(wP;|[!p5LA΢:IإI*jS9=IuWTsu)[ TmQife;8KL^Qw+m-mc`V%Od8Vznm;mid=`j,m9%آss'`ȗ3_QuեRN'rz*:ga*%xp= "sb+Q^ьIj1M)dDu``d#ш,1׸)dp5IF}Qb&=;]0:“TWުQKT@u`sh$,a= !W2m [ksyDo[K@X9םKou5lQ6.VBwrIQU]KO&b=-| ުS )y5|7lwb\>r}>T wZy}߃YK 揌F~|טDFgDF:U+@皒:F)b-^BamlL6*#"Dl `> IjlvځXCrDvLA~ 75IRT?iE܆H5&OdQVC*\%?/]sųR[ǢE8-m2vyb{RHMd&p7n; ޛX\=)@Է>|T}Ȋa=LekP$\D't+Ŷ\CU看(t08/exJ^ƶ?o{?[s_N+P= 1W܃#h|d0VY;LR֯$.܂HX,5:;cQ0z'su#[uL>&ToNTگZd"k" 'RjušvLAfĮL}jQ##P7p\ lkض.N:9,_Quιnr0WO/NR̀ι&v TR%Mi3%DA$dx́ cE"K T&RŹsuC)TVe0ݜ\Xy/&RY񷭽UmD6抉9SQ?Cd>,a[6z[I 5فIƨޟNP."Y*w8jXTɹ+9CD: IWA?&`ӨlQ ޮM۪ꦕ(TVbvH9cLAZIH`NRT7 tr $sMU rX?MGͿ3 qB@{(e2$@ӿ#_ lR,Op\1Eu4"CdLA~< CX/jhۅsw 2K:A~%#܏D4q.-:\CyoS8`+`rDVL!"׷. uyR\S4ι%"`7ymU}lQ.~3}3)̹&/E&g)D:00է+Gs"b]+I*1 *[ǹZ+ι#"Ӌȟma$ކgXO[e?AX;{ ThiU5C.ѩ2Y:xޔTuh\ԾxS/Dor8WK]s-CSŹs GD\ԛ*j)n1[ed[aߝ[ƼVd 0)|h"𷴈m?냻p0 /z6EƶʺZ!"ՕB4qVU\>L 쮪_'Քe/fb6RuMfVDho'S udI,"c|hT۲,06aŠ;zc ([~ X%2 ʹ+ι "KP*pe-&^ ,%VU~`ط,S_a SlT~)Pf.G$\vۏ$OR扪s*/UT#~\3٢-ʠăZ*-< LlYɾgm NO#r+":F> 0!2:#p"n=s+sU\s vs`jFB)J9=\ 'OfdվbmM lR,M-SQ3y K9იj+lDh+DDdi>E9E.% s5U\]ٱs'aUUx`}1_x1[&X kooMX w`wQZkG˦LMTw~.Vc/BdiFz9pa0&in"ñmQn8Ws/[-QT٢l\,3}O)Y&;Na(5;"kgRXVD>z#2]E)D|H* \,9|E9WWDdiπ)I(>M\KMQ٢Q뙁Sd r47z_I*cl sŌqۈH<_@|DM!f?U$չU\][88:fy`m-T٢m5frRNI6&SyV2xA =y \=k#p*V|)@:gdCρᨎN'& 27z:GdRD OTs5OD~RY/fƢd(p%V!86G)'Uhi-v]&)԰2Ɂ8`r^ÖAKd7֎jnzst3Ͱsѕm:A58˰.KwVfvMou4IW$XQz`qlQg{> ޺2\Eu qI*DT/VIWTZ`Ese%ՁS=Iug,UYm4SB +S$ăZ bh`"W h%InCY:Wݴ;-7WwMou9zVUK>-N٨9b*dJVK,}Df;W[T@;V(i7|VTY8ظ+&@n"`+Q7m+YiMun*s.U"2DD.Du 'Sbw^eDJXyfWUR Ē0IؠnTP5v=4=p"#2[5O2ZX!75ERXow4D9YW UDjm=:`(vcjT2YKn6+9J3o#,Dh>Db;]\:Չi\ιTY0ސ|T)[e")J9Ak-m2h^n22Y 9;o9q +檏Ӏ5X9#/" `g^)E\U\Dh_T'oyڷJ9}X(`ZFfrt( oB{~  d p7I *K\5?vȮHܨ~lAeAM[]S~ R,5_Qu%FDf.UUJ6-Zb1{rn16[1`{ 2LRG2{0p,0g7W~՟D6Vӣ (`uTM'g;šgSx 蜫y"Ib6$r<3VhiD_wћuSe r(pI7e oTiT~/uY'(Rz'pB0,peh%2Œ^~;zU\Mg.21JUOTr:ӽσـE![VF{ŊDF,,SDdO9U Q 1W<} K4z&;NN>w$U E޹_\{~vs> KRH:.lQǶzػd[-m1gUmoոdd "y`r^oI>:`Oz4Xr3p#;q`Ȩۣzs:Af^)E\CD9/z^|~ީfdrpP@ӰM]hiAXQ#۶6w)@``;`r^v%.$ŶoK8U+:7~ XIUV\ )tb)'ka}m/LS;"JbԢI*ti:%%43vnmD<ZT/Ŏt83hjtOp6O;8ܛv||?kfa~6ˆ$\_n솉}^͘7}vDgomX~;_Eowg|~i}Ď|L쨱`gTNd(c8}ȠuyعC9t3)Ts$f:go;Hŧ@UҸqc&|E2矙//}NJٸq#u֥yܣl[k㏓5Iҵ+>v̟ƞ5-Gwbͬ߶٫f3uT}ښ>ԬZ˻^)¡-JFrwOx7xwXpsiPA^O_+]Yt|yiKtmޕ(6qyG:]v'13Gڹ~/eWf{ on *yy0w.|-'G3y իx  __|'|2<$rssy;z47o.Q'V-N:T^=u ++}^ˇϗZfzڵoS>[tQ/̈Q{}_|饌xٰfV[sxqfƨQ|Ǽ0r$u5< ?~OkCm/~OyyyIi' 6,2xn /ڞZqe?]FrTͨKy+qbVͬJZJQ[tKFkc0~Nz$&>1]tq0c ~)սI[C8Ħ*HRV,_?w*&5j֤W>1G~ţ.sau1ڵ\xӮ5yJjqwsPntܹ@\?zlZ9]5ZӮa;t{Px댷8ccGVf>_~|ceV s~g2PVs#2M&/ k6)(Ko+?2zARt8[vnZe^x,ecώڏyeLrк^kNhw9urըRg:h㢸&WØ1sի^{Anm`օ>pAX7cܴ-Sa@xnHӧ{v5jGE(:]sZhƍ L:?G}4NW[Rvx}H{s]:(=\/Ϝs"^oװ/}St_~H>[YLK}7}v˺_k]5Q6Gxڛzei5<; #Y"@O{c)ny7#^2a7[\Zq\^f8ڸ-Y>^x/ֺ5\qvVٳ]}]ХKRF""ۯzg.ᇄӏ?27<炧Jzԫ_Ⴄ֯~e[JTu.97~m1s|}C& [e'~/)ЬV eo[6n6^O'+3+|%7߄-?'6 IDATovR۶u#"0Rn6m9Zo{YbF,U^ GϞɉPZR!s@5kmb1{_0[>%o`5Ed77Վ׬Z-7 ;vZRҝwIȞ{/" Rn ٛlEzϟ7[̟;D9TmJrޞkײsFawVC,Y{i{ 7ٿx[6~z}:s.P;vBI?YsE)ꍤgd@K&&"@UE*U|sK÷Nn<dzgLJ_fzI9^xqj:"-޸sE*}\;sMyY[vvV혧 K ԩV'5reIꥦ/C:v9))l~i'>MժU~qۚaCi׎3ztR!׺^k|טf 9ҬJՇ<^*Um9ʊ-3ihe%zWݸƍdI"U)Su=|U6n[nU+WF(˗{Điw֤iSi|g>cI &Z3{HqYM;{FvLJf/ T{ٻmJ4٘浼S:}Vر9=+uw4B>HHJ4i҄[B# 5ENoO$ ?/ju$6o\^IiW]v%Q&-rrhݦMVk׮4ӠFW t~Z;K};yɉkOP);uxᆅ_[<(S&+m*x޽⋡vN-wD*R*׏yM72x'ˋZfvx۶m 1\qv}lx)1+((` ۶6(imWD#Ğ[qpʾO&:9ך=6x;]FPߤ̄VaBڰ{,5}KE`CaD@UJ%77oNQM5cV֨YF򟧞M6yڵDYG}@fF4!'' ࢋؿS'oI-y_0زk]cW3yG{7 BII۟O쮂]h{Xylٹ[Pti2N%ǵ*ŰOa}hTz%װ7kwiȻqKJFe?;w؎vVӇ;6سI[*Xn.| 2w̄q#,['ëB^0a `32a nDCΝ.UV :˷9o+:>{wұڭ>}ܦ5o\4M6%#hDu۶md+yޯ+1Glrbo欞9]5 2 s#JVIօLO4bhvm qve{?ݘU)7yy<䓞s;͔)LGLߍEɾ7ud~u΄q?>Ջ=֠Qգ~GqzFHB&u^.*hԤ9ڏE,?weݖJFf0%_5+`.>KH g{;6Uֺ)ewa^$a T\}]Ϩ:} <سUM&M[1.ڷѣ=kE7_ϰ5kkfgsPVLZ'yܗ}Ƣ cwei IV?Yt]5YM6 ?&>ĨmK=q}dA^y)6U-oJQB ٣/8?;^5嗲[jɩJ(0RSOy]YytiCnvgZiðo{`ݱcC bQk.ϹcO?O߾dO3V-><>O0֭=QYުW% >(^|yϨG:$>wM3zUh4bM;6mtۣ[]:o}}3vۣG92F__0i˧Ӓ,= cDob=pg23aXð1cӷH YDb"RƼW DNK7] |LK.:\}ŢUp\4p ˖.eŬ_ Ӳe6m⊋/f1w堃hs-rrxWg%,\,gG6>d/X}KFx&8~&}%&n77l`/s5nP.2g[9ԨRz?&#bۿ>MݞիTg8f1X1#^r;1Ϲ'=>Pc7=tPt_/[[vnz3:--q_/dy(ns##f(ǖ-nfIT .s'}89OAW_|'WZ;; ?ϻEff&۴ȣO߾aԍ6pX*эx9ں#G쮯W|kEoxGyO`Xz}^:"nUOg/ә+grma ܫe/\9"'hY&xs_j*jQz^eG >ۀ݂˻^SC${^N| ~;4 Tff4o p睰re|]?< ?ٺ}WUDUI1GsՠAESr7O:{ϷE ջ7_tF͚o۶'/3yҤR#O+9SS]bo* /I GU~E:Fq%>.ZYJuns#>?;oj}fT>730&>٫f|reVEPz(H}KNy[҄a 0_<u|~ss˺mh߰=wfаykqŇWt6ē5z붕MF;7ٖW{ZJŷnu| dgsU)wkyv[ڮ]uc}j>۷é?_WD(PZ}ZƟ-3T^^p'3ud8{wZаQ#֭͛Yv-+/gw߱=tRXt)twr ݗߟ R^=ٴa'ޓ4*~3')͛N4yfRyyҖ$]$øw&?-#֙j=Gd#gYY|s3cW yy5k݄;7˾ZsB84nB&*ŚkX)Koa!N:9=ɩC Qm`ݶuY=i˧WuoѝU-LY2]nSN4f^&/kl^9K6- 'tZy00{l6nU29.H?͛CϞв%4hචٰMGliW?OMO[^0޲݂FTED*٫ٺ5[D/c<]el2^u^ropr(rVdenIh"Gr.)RYo5#")""""""VHZQ*""""""iE""""""VHZQ*""""""iE""""""VHZQ*""""""iE""""""VHZQ*""""""iE""""""VHZQ*""""""iE""""""VHZQ*""""""iE""""""VHZQ*""""""iE""""""VHZ1T2S4NHjhDUDDDDDDҊUI+ TEDDDDD$(P@UDDDDDDҊUI+ TEDDDDD$(P@UDDDDDDҊUI+ TEDDDDD$(P@UDDDDDDҊUI+ TEDDDDD$(P@UDDDDDDҊUI+ TEDDDDD$(P@UDDDDDDҊUI+UR)k#\;""#\T}I/ TE$~~7wk_5.PgQ.Gkʳ#g5>׳p kyD,ώ"B+|ȁ(uc NB{t3{qT,sۂǀ!>׳(HLTrpѮR׈׫˧;"""eDɔDD7B{C$R#+HOH<,TwD$Ŀ|-1Mԁi QQDupr@UDvSmÜG*YK$7;y[>)' TE9 ̍PかǿS"H}C},p(`WL\Fv8:>998OfX6p_Kk\ߏ"/@_~}྾޸=e;q߲6 xzm`ޯ/K`ֵ){8hG? +}^M_q,xPO9.WpS'2mG65{=!J{h{7s#""b+E/zUy_[OQQ.rho*Bk-2B,F>z3u`Z Di7>ϰpz~,7N0%i@uΏ®8x^}"(}caj}Xmz b~dE1O[,p`2GwO{e&Y'qW^% ->W/vכ}]ԽLID*\_b7NFYz#JblF ԏ[ɯ@g( gJyҺb'x}{/6X'x{\ 5 8zq#E)QN}"3'pfIH"H%%A>e~o\4een8xEsp.IML"ow'Xwnb}}ea%nD7 3ڧL.1>[" ""qI"BɔL4†@{ZƯnu [sFt5럠O>G{pucc:"oO ##c!˧e%S*| R]8X_~ɔ"[S{iu˷pO/"KTcU [Bds~ɔg:;- S-P&,wO"Q2%R2J҈즺㶦 %aW.Pg.ᷰ)V( IDATs$o#Lg <7IUѪS "\;$d vn+o-k* F州6`2pkiO3ymWBUōGXvGtw"<ͼX)Ph s*"oAnMDNTtI*K~|s-\؟_:8J_o8J۔ _ Ե >>;[6:Gl᾿|zznzy8pH"%"R 5%Sn$.ܨhFy˛u} ~)on4;RP%jik[OY(}P+nRi1nv{pY)*Pm"geM~A?2ksB TED S@"| Eq|"O=78r=>֔X@ᴊr=7z8؃hPxNU~R"+"RhꯈTb{ ̵ 6m}&Gnpҟ60U[\ Nש""KkTEDJn^H|D v,Y[Fp};{VM#f .[re8nQ2GN7RgÏ=qyDbwGCp!˷#S5XH /~ .qM<'ҩ{(b R#Mq()ҍ>uW^]m73`&=DhꯈH{n\PR)V_vN%w?CQK uܔh{;7}WDDRAHnU'ѽS `TM|l E2Qp#pQ][z= vt.Ymo9Rp">>s|uRAB=Jܕ09;p,lECKpCgKQٸQ )q6>fqɣz㦮EߧF{p)&$\pνuF_g[0IJentMC?=n8ւmp{'RC[$`*gp#S׫mysY^a""XLtb }Tœ7%%mj#I|\+}ObeRTwDdwNHjhDUD$)>&| .cG&CHYURۆێ%}FTED0nmN`5>,B[˫S"""" ""qC#ʸ/""""M)mq۲H$][\rƩH@UD$)N|OLEDDD$Z*"_=UI""qk4WDDDDDDҊUI+ TEDDDDD$(P@UDDDDDDҊUI+ TEDDDDD$(P@UDDDDDDҊUI+ TEDDDDD$(P@UDDDDDDҊUI+ TEDDDDD$(P@UDDDDDDҊUI+UR݁Jn,8՝IC RIcMuDDDDDDDhꯈ""""""VHZQ*""""""iE""""""VHZQ*""""""iE""""""VHZQ*""""""iE""""""VHZQ*""""""iE""""""VHZQ*""""""iE""""""VHZQ*""""""iE""""""V""{0tskTGsg/k۩쏈TZ>H%gi T:Z+6BN/n/]$VƘONfHrcN6OeDDrЈ1@㽀EV6rH0dN?""DkTEB3<@ ^k5VSFD$XhHFTEB2dFM4ZP $""""I@UD*cLܠUW"KeGDDP*"1:6pbAgXk?LMDDDD$4Hy=՝Hcwwtz p۬t%0ih[kX pn/˺Z`N(54:̀Z`vn2//N@ s vOleY3x_֕w$ޣ%m]{HS*"i ?gch/[ m2ƌC{ :uUcL&n~!՞އ? 3 -dcL@.& 1f0xZ9{$1^ {p&..L'?DƘ߁g}Ƙ{Ntwg*aK'19kmwoZkη=< Rm1M1TౠSZkc~T3| j=:Nd}=Z@+N *py駬݁UIk_&{^ m9ζoT?u1W[k_RnPmcL Z^1tNH/F"|`Z{[  xhh.3Z;5{x'cH_#8.Ʋ{WcN)}HߎFZD T1mpAv>'S 3Ts6.NpsvR{d}kcz~^ZDS}?s6tKp}LG)nHߵ\n8 "RiUI[Ƙ}xE@v Rq\ x4:038ήgcXrpF0o5ƜKnj1_P2H]|EZr)0sD,(G=qԿ_ Zy$29Ƙ:b{R(}w=:E)rƘkyuƘ&qަ0H-~13{$S>3 >kв""5"1]pC<\ΠqSsG1GXkkYkXkONCƘC0tZpߡ@km[kmKܨ+>u0hhͱ6' h`ZkD`m܈TO} mƸ P ucLS{pS;@mkiǭnmmk=9YݜT'638p<<8pAi(:B޳ ZӇkko o.pW˅PZ{v@x/ZZ[/9'rka^eEDvk TE$ktn&pX n1FXk-hn8` T+ٹ8Z;58Yvmi-@wkc!}{gH c .h~9ZZKh@^ Z^ڇXk /Zk֮ sZkkmmk8`\ _@cQ> TkS#0-L vT5 "kMSuq?o; /Zk󭵣q3"Ū VLiA3qED$M)Pt2_F?ې19piЩU]~t18n?kZG``~Ssg}"~e4gx3Ƅ­O Z&rwx4j!>roqGDDʑUIwqmCA߉RklXשΰ}$z_ ~4MpY,V[ų^,(/;F@4 S&4PKfn[p,7];Mfqc6 ȯS,""!H:.qun{>~g7sf#c{["EEڢ+0EZ uPxꗡRƘƘƘ'1c&c7~**V8jpX=Q l;0%!Q!u}y,__@ϖǺ@~n $[4}TE.j4Q7^e I4O1GF:c.Ƙq%x=frO<:gƘ'Uɶ8JƘpko!u} G];<2:%g?e* >e@`vK@u, rYt=wQmHS*"Mf;.g־b jW\z6R^q6ޭYe-n҈)OBG .#PqƘ3(&Q֮387m)&sI-NHP*"iZZ`e$#_1[\إV enyBVI1k9}67 `-n)Ƙq31kI*7/_1pӂNN Z[!hB 9XDDRCwl$I@Z8Z |V7T_r2f }[kIIOR>Aɍ#Obp ?}ֺp${#nkz~y Sϐ22Scn%"TD<1־Y VQ<)X l{pu1U!+x Tqnnpz@5xd@`SՒBhBp3Dz^uGDDQ_ⲯr 76Ch`_N7*Nx,W@q;t0@T'RRpS/Nhv`qb~~&ԢKBm#lRDD*"RaXkGQ2XL0ԉP-x+ ଲ]֮Y1sQ1eV6t:$ps/cL{hXkfֳKouz)@àiʭ 9nKƘ1%}ٖGDd@UD*@z1`nd5\bCqR; ~21)!DZz=Lb\-eZ;?zgֵFjSj1g Yks~f1ԈH M8l)JHT8ڑVKZkNFF>bf\UA]{ V1$Wg>c|cś7+:7~g?@5cML0Χʔ7.( Vԟ/S*"R Xoz0>Lz9ބBcŇ11"ç,fMV.$z`1_]cLkcc2f:/cJ'kiyx2pj[ۅwl$u^_c+`x>uZ-^9~sucnFwsc[CZc cLcL< DDv;+"@6)'ndvKc/́׌1p#nˀ@S \恞v1*i9bYBVnO\Ä́fHfPEXco@6n Mݍgwav$@H! Q>7PDVY}}",{d?Sԭ۷{;fn-VWԩOEU1w]??Ow<xsH-P+U!d NocK֤fn?,#/"?~N pjqgAUq4AY jEw FEu퓨Ã>/41diT?"}jם-7I``.v>&5*[00qpPɶd[=G˛]}Jq v_? <ܡ]v]%VƖmste;4WTq34RTq&H888`888V888:888moO88}XMR ؞8TUqOp8R\Quq/q;[tppqzWTqos,0lb[MGUS[-8ӳZqqq/qqqi+\Quqqq WTqqqUqqqpEqqqi+\Quqqq WTqqqUqqqjqYpA>QVTGD 3TV8N@T28qfꜞq "ӣUVTGDv Tul+qUqځKK+"wVջ{N,/!"Qֻ~*"$ϳTV88ר:  SDn[,V.s'o-qqZ+uUZ-88qvd`J06|"{qq>[TiG^W՗ެ'g&uv}Qqqf㊪8}U |x=KN3ns""K H>+"?QB/pߪ'S}T%e:.0OUW.eG#mEs/"Gc| XT&&U0*`m:@DsUlS \'tJ7oe6 [sn}s%"WbsjR,"a,`hȚOz#9`:eNNWy%d K *!<}`l|Z{܎|WU?7~Hy\HU_l$c+b|gR<'3Uj8+eL$"Y1)Ϊom+d"]Oz2fzWx',՝_ ΦߡCn"#UY )!X`?`_ UTvL2Fy,! `c RwAwR'?c` > \~G_cU%dqDd|75zv*JtjeuG{ظfl8N[ኪ8}J}vAA+n|Z^U_T՝؅uWϻMUxWUcnai/>LPTƊsZN jx~ǫ;iоUu_5ͣۨ [zn5l Z⃢gUiU}4~/"G@""󩽾&M6Us5_՜`eaGu ا0+3UDmkc3gݪ:';[UO֭|HpΏk/S+in^@K5qm\QuO!"+U&-'f8KU)1jʵ |lV*R7^ \3֩6$X<U1\j% ч?eDUUqኪ8~"BFa-R CMTuzyJy9e:PA`Uu6)#omse-u6Z&IلhTxp憞/ ܍0XțqSU)e<%"GjjGEd^ RV /Ly8siv{2!k>p, h\N_+Yيļ. qhr\u=U8c4 "xUŠq :QQMDdR4S#>j-3U2RQ#D]&#"Wl&} wͧc/"@T\`bSPտȵ'C`{ {텮n+akK%UU-}sg0+eHýj\lB%7n]vIC8k8N!*eI 89HUK3C=hYpmKDdKLԮʢ E t`JdwB7kN",?4`8=_9 l+M-IuU* |ហ J*tѠ˸9qOSCӯUqVኪ8}EDdVuS5o*ֵxNEa9F:Vsnwܵ78S0DUmޥ98Sugl?ݔŰ(ҟSzBcWu{jˤ=MVTU- );.񶫔K%&8tvp⊪8)!07.3"7GN7T&v׈e%=?=! ~X_|w|R|:YSU_A\&"[f Ķ:@maZrTNy,ƶ UգKmTf ؇a0:ޛq/㊪85[-CTVL |-x^t#يȈ`+C)%vJ lysPD7,IaًCʶ.9S3f = =-O!, cmb_s{[Ty2"-7[#YDU,Fۆ`SqOnf8]Ԯ:@DTذdI#VJԵoRp:[6{U?65, 䙫 M*i,z٪-.ׄރX,#5H!8gqEq>K6(kÜrQs/0Pn+o e'DdE:-ǭ^%0褩ޖ 0fn*nj/zcDd3̵:IUM-M'X6Hׁ?H3?]Sc/Vת8N3qEqϩ~ADV)wfV*\Mm/H"2,Oq]K֫ܭe}/waEdV 8L\QuOӀ?EY= x': ^eUm1szڡŁEvٹJZ2}Lƶ -","2QDZw-㑂}sEo*SC,%wWۈ;,+"UO*Yi64>e\QQ+"g~EdlYN;&Nj:y!"#EdI8}\QugA -"51C`uplPԸO=ZWbiAYےeupßDd>I@Ge޶:8x::$"_/RDdQ\L֝4c/`MD>QLlc#"q*y96Zaθ;n'"K7RUr(kil2-^_ݓQ롪/`Qn")h>"2EDa o_ ~3S؆a[T98STKsDsz1GY OiT`mH#1K%鴖4Q!5r=| رU}^DC'KvI`[4GO~x )VU""bh+T;i/|hesbk?HD賗=(׫xw9֨0f"nz%8΂:@S1/c抛WjL)F6^%Ps =|IU(flVFU_Qm5wEy>-`eU&w=4˸#?훺6<_{sŖp,'gc-U=Dʄ[&`˰mH^͏C Δǃ }T-k[j]?""U=s?΁x 8DϣsT0`]̫cfjc/u6s@-q" `%I}ffo""07ȥ0 ,MT9[(l{5,p2ؼ S+">֠sUKX-dU ,Y_cg˶`=s]s[ǻwY X;Ȳ4&V$ϓnֈȺtZꗀCw=̚$st*07T+1W XŀWs8΂+8m@wUqq j9888m+888N[ኪ888V888:888m+888N[188|?lNqq~888888:888m+888N[ኪ888V888:888m+888N[ኪ888V888Z-8` "GYV8)"+02WR leݬS[%8UqZDYϨ꜊m NWՙݓΩYWTv<pEX{΀+OqEqvR`x%๲Eϒv_4i."8H8RV!"%wUV8}Y%}3UV8ר:ӧ_YI ^ JVc^k8ӧyk8M-IDdf5jё884 WTsszZkrqq+)Ddp1d#c.EU5R988Uq "(p%U=o3888}\leOvQۻ":Q4AU-;* 0 WU)YwmڣTD^Sǚ~O#"+`yFLRg[*X 8` 6WTէZ*TADVâ?l"2X;6pޠZ=d4vyO3Fr!-[_t:>6'} Em5RuthL׀Tuzqt_ U>O_x8)q  i3Koc{Pu磴OEcL8-oR͐p,b9v,| W)ؾK]R3 < )nyذ<=ǣgD2KLӎI;ÒϏ o`Qa` QgWr"_>g(4ok(W:c78:B`-X%~ NU_5(8<~NDg6(06^ n./Kfd;?nEe߬8%2C1Խ ' ߄o]+)fΗ:}ƮoGlG;bC, 8|9z8XD~Ts=-Z_D U ub7 8.޺OFǗ%5fRP٫u_HQ)4sԺ.HLUJMBA=-X`>ȨE-Ivњ<~%EYo`ޝ#7DDʇ:w4%*(k(`/,2SB^U=PT2UuqVyzgY!TEjcF`N{JQVmՂ\>nӿpEqv$0KUjcdf\bW]zarRi\2E;~ \==ŧ]_vu"O`2{5YVF/^'R;Sſ?rۺ2Y^Wߗ,KޯyX ,nvĂ_L]-^~^~p=OjHn)qWTi`mGvvR[6#%ާ.O7)5BYH+IYMˎC3I*3H-'h)}8 WTI9rffR!DUT : 1euے0:Z"PUX̣|]$}hEFtg J|.*gYCKǾ@wVthV vuzǴ봻/!'"'`[_uuPͧ/K9NU_3 6~;Sy'uY pEqEU+ϥ?SV+ýpr< S%V2溕 jTez"M'~0YPb4TuBwǮRZK۫*_Vf̕ODN:[p)՛+oőEjl_"T;n\S?i=;*s۱y7OuQEdcl+ݰui%0#}EAgUqR.] Tfd#Ơ{39XUӈtT?l_\#+si&=]qͶ犪c769nإUoYjn;8ʚ 쨪hswCk; ꄈ,[WջJT/ 7"VxlK//"/߫8}8N/Ў,1zjvA SaޠCQ]\DSRdUo}kjuD٭JMwzcQeVl\m&"T5}StD.C*G2iy-n+Bmzw(;~V *ht|x%UDЍ`An>c("J7v>Evqbn^caIQj!*e~j]FJZ^te*jH#Ve9)*eC_qUC$p-_ܦ7n?jT."]\P 0j/|PsCDۄnAr}eAڂǸ8NCU%_YdYU{ ~#3BЌ҈:}\rt!A%eUDoT%QiQ)"Rwd4jX^](kF dSJ;kEDdM:+TGr? \T&EUc Uvݴ.w!"RWA Fv k-W("IOR/==^\ "iD }<9>FD,[9J:>CS}TT~QN)+ i\QuOUV'(RAɨ}E&,qWՙ{ZlA.pk89BDA1mjp{ `帍Z@B)1r_LDknԞ_^"\Njy7Z+Yz_Z6 j]s x`JԺDZ;(=M6v{ydȊAؚ:c6`>8>ZDSDd=_QS uxw杈Aky=K^PH_Dv\yҪz1(kpu$" Dk9Eȡy/DdZ-d GENR S\偂 r6Zqx0eeba9X{XP˪zVb? s'G_0+OW5"r&.lx -Fq:N;=<ߏX4?>LlVD¬E1ocؼX}lQU5ihݠLCD:0{p<USp]ǮBz9ȷ<8 lO톶aR@sKjV;NQUOZ!T7sK D]bb JƝbIs*cyoZEK_QWJ s8|PvI{hYJˌ)|eSʴ}-Ԝ[e1 HL-|;A> ,[rL` o%4wڼ7[+|Ǐ޺](:׊ҋKFj,hUQ} .. `wLw*ϰ'Kiop}v=~[D?9E sof\Ock lQt\7fO^_Q=TuvIqo>]k˄TUV9>8s߿?hL-q_uo#oa[ M$; 4af𤪖^;֛5^ÂΌk{:/l ܢ=AIzX撘 pV]?;s^*0Eu ;ny^,eNnP"7dy> Stչ1UKGIa疘;̂6ru%ʮY%bl[؋WU%!-c&X{7pEi6Uqjh:8΂BGP]"888~UqqqpEqqqi+\Quqqq WTqqqUqqq𨿎8w-:n8.>888N[ᮿ888N[ኪ888V888:888m+888N[ኪ888V888:888m+888N[188UӁzFAS!.UcOQ!Um+q3q~,٠l] qY/ʺBU_>XA3sêj#jvUݪ"2X8}S/|  רNqθEqv(`*D1FH)DdQ ~|PUlB[g'|J"lh4Um !"h^Jyi>Fqq"KqڂFi&k: mxLDmB{8k8NUqڑg1Wɘ0EDDd7U7s""Q%M>HY"2@UaxwZ)kt$ӆ[DdaUj9\ULmg YIUgN`cLPuRջLYROWI"rMyU]2,qbϬ$"{Z<:[UL>^ طrpbTŮ6sUu~TPl88o&Fĩ\d8 4ZzЬq㮿NEUțu"N݄gڇu6 "#LK׫ PD> l, ;ݓv z*E}Xd,ٮ`6h7Kb۹_~&3Iެ_)Gj=|d_DAoJu[bԲ2uK}Y6JQo\^ό>;AEe\PnP5{LyJ康0ehNN˜O5z?}`%2% aOM~,^ͣB5\)y䩯%wu;W.i NS(eac9 k6"rqnìX8QD. օ$X hP|i4*֠,UqbIqVĔޢG~py`ps2"rp#Ĭ00[YUۋ ٟk#"n& ND.BCK&"Wزu"RڋED‚lP`L+SEoEE1%)(o%&ņb//98:p#/nY$cTIT$}xQ9f,`7 se] {8γU8!~ {p`Fr܀ə10Wu0vfplu\E@:ĺN߱_c֑a{\# wADdIqyQ.fr=1ED2ɞ ܋ױb:\8NDDU 6)`4cWl+QS0B` 1e)}od]5f߀"ݻb/5YBW~Kp H5܃Y':`2KU"D3>w*> x`TKt4ڈwgC=׈0O>]`K&b㔽$ !" ]{bc4'b/2vÂBڤSo%Z0v+ӛ%\^РIwo6=Z/ڌrl =:-wŬdS~a<.L[{ب<ðηD7侉|u]raY?%nkP~l`Vep:\`ӂSuz%sWO7 *9;&yt/&@pq:A?]u݋ ˩Dr礍'm70)@ѹ֞OO:e7^Nm0FG'?)?8;0YsݓvH-NM[fMey?zTVTU$4nQxNRT@ nJ/K(fFK7i5SQ,je++s<`lJtsATQÛJ>ԙLbPQ HTswETgu.΋T;5~kθՠ*YJɤ({{-){ %nK/) t- ^{?:\.͚U2("TZNDFbA62Zۄ(_]'{r1rN {ؓȢ_S֧\||/ş,'mTgEYT\3w>ݠs1nj0PH];"e]/7ADdƛ1rwQ}# 墈@84lxV\4ԮVU/.&ٹcT9K'ǛUqWT~T ؃`x_"HׯFDJ3C$̂#QU׬ <-rD횪?ۍV7}0~DU/v]9u+Vٖ 43; s{[Kջ/h3U']ӚwNYʐlt=6Dmۍez.6HQT~[" (BUwj[em""sլJ;ڵHQPUn\GOTFls!)!j-2 ֓f)IU?XeV{IDATQ-}jy͋R"9BU[FH#dYb=Ifm(tUrbloQB휻h4溘wN򸬂2̵jC$&?j},erB%m>ZJ7VS;UC7=ޒ}̣60ܨ;N:NDx ܂= e[l|PD>~@^~lF־6lsڮR7ǞFȈzsx)qY$VT9;֜"r ~FU}we4)oH.RCE#JU/"ƋȰ.(it/"Ǫܜ;`2NoZq;^W(Pq^ }eUM;kR\/x=R( SyVm鳸89"/,a%NVqSo rCbdg cٽs"Oה[]!un)ݐ` O#1ט`A-g2q ˘%JLkF$&1_WuUbYVOfDMA!uRNn]RTٚe1{aCu%G&,UDz<_主Ky5%B wz{{koB=% WT|8dڳ.ղVpV=,=fWUeH]8>*/bJQ;tu j9ɛyTi]h# 58DQ箈^JZlF:o[%Mk최7\QuTup܊~OTu| "GJj]؞s]᜼EE?f0Bz x?#S:?"YS): h  bEn|[V)VKa/_v[X"M7E/"5"K(5J*t~˖U{;{PzM$eq:8%PKE~ 0smsޱ<ɡ7<ӂCUg>XP:e>S\Umf4P5cED*[qy]Uuǿ$> $B@? HQT"CU)`WDF:$i-3jѫɨv-/"7 9PU!O3g Z0RL͂`TA=doѨ ",/i4s@?_+3amF;euۨ[5Ѹg,8l!j7 _f] 03iZWj圌)*tqP>IdޡPLUQ3>[ޑ}}W{UI5+?{ƳxSIyVl}o4-PTMM3;83 NO叔lK-$uzET46g]#=ybz]!]bD$W,)SJ29.fZvLYn+욜L߻d]1Μ.3k|l gafN@v3 [ n5T5*mUnɾm. WI;Jn(*SMsG{(^MJI\r).w4?d/K;,<]%m\>()?2$4:@^ be۬M\}ʔfE79v$WӸ1v)'/4N+ϝ&EY/&OL(uѸgEE~?`H4l!pMai <`}oyo|Ni tu>BQ !`f3p%)PJlVU\~f+!ݒh"GpaxjmܬL/ u>VOeܔO4Q)S~`IV5Kf-Jrr9EJDBliiXnkHڠj_&KfGzIZWJ,3M}|AW Bg*IMSuIpAdg~SڹG$m+騲cI1$}*fvξTA0FM:G$O'`cf6"7xtܢ ǷYهpE~o~5ՙDc> L)>`yV`:{ܗn[{"mʟ? $:ϒB+Zr cKxʨ5< ܗ_٢gc`|V~UY*;IkZo+;]RB8Gwh ʭ]xOo]`b6[ }iypSAޭ@7~JݐIYhRnn6Z6]dXRњ{OG-\\VRQMl?<+3ըX 1+7C7}d geuE5st>^\Rf %~OZm]ؿ6!)6~G6Ohr} WOd.ɂ|Iq󱨤>J~VkZzGeoJ;3އ[޼Zw}LQM휒,_/~CNmk۹xwgJ61m()-M}Aod".km\مz60 F,ffxC%%3)<='WKmZZRT5f6_ (˳\!]#;VBoAc١?kfˀ,8R4x _e,3nN*9zj21?Gq+b7qce竭jּW /{7|efvEŶ}3z!anּ3ʟa#3~  ->/-IZtj㚒+V*o+KZ |ܼ8< >A&G5јً>~$m|z7IWXVfH~7ҍhҘmE̖J:F?Am^I` Ǩ y_1 `I>S,Mh6jI$# \|3 @C&TTV}(!R܄v\, & l݀+y:'4oO<>=(/񉊝TQ)x;"k1j`FBUde$(|'F\?_3O C;rݛ,J̰_Dᑎ[7_+9klxw B܄+В& (Y4:ݐ3>Hќ7ps私g;1A"1]\,4AcXQ Ff0,K5$H_ 8fϗYWy'pZ9f*le% \V:p0AtPTQ]GBQ$՛/IIZfٝx.'AA$}ZbI//WИ+fHAtPTʗDt $]$O;,_y@-"D<}AМxUK] 6}<=M?MN4 e;]G9` Na% 2&HO ䷺8f P+Յ< ْAAPT18}Z(wMчu<p %}TҐI-=  `i1G5.1U*ԩ)k4) 90Ve#+ۛYnAA=cISyҹ=SDt`B⋁S`26*2AAF(ENfv O>AN2s;^AA)BQ 4 image/svg+xml PUBsfasdfasdf IPython Kernel ROUTER ROUTER PUB - Kernel raw_input - Requests to kernel - Kernel output broadcast - Request/Reply direction Front-end Kernel Proxy DEAL SUB DEAL Front-end Kernel Proxy SUB DEAL Front-end Kernel Proxy SUB DEAL Q W E R T Y U I O P A S D F G H J K L Z X C V B N M 1 2 3 4 5 6 7 8 9 0 ENTER Q W E R T Y U I O P A S D F G H J K L Z X C V B N M 1 2 3 4 5 6 7 8 9 0 ENTER Q W E R T Y U I O P A S D F G H J K L Z X C V B N M 1 2 3 4 5 6 7 8 9 0 ENTER jupyter_client-8.6.2/docs/index.rst000066400000000000000000000015621462351563100173720ustar00rootroot00000000000000Jupyter Client |version| ======================== This package provides the Python API for starting, managing and communicating with Jupyter kernels. .. important:: This document contains the authoritative description of the Jupyter messaging protocol. All developers are strongly encouraged to keep it updated as the implementation evolves, so that we have a single common reference for all protocol details. .. toctree:: :maxdepth: 2 :caption: User Documentation messaging .. toctree:: :maxdepth: 2 :caption: Developer documentation kernels wrapperkernels provisioning pending-kernels .. toctree:: :maxdepth: 2 :caption: API API Docs .. toctree:: :maxdepth: 2 :caption: Changes changelog migration Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` jupyter_client-8.6.2/docs/kernels.rst000066400000000000000000000222141462351563100177230ustar00rootroot00000000000000.. _kernels: ========================== Making kernels for Jupyter ========================== A 'kernel' is a program that runs and introspects the user's code. IPython includes a kernel for Python code, and people have written kernels for `several other languages `_. At kernel startup, Jupyter passes the kernel a connection file. This specifies how to set up communications with the frontend. There are three options for writing a kernel: 1. You can reuse the IPython kernel machinery to handle the communications, and just describe how to execute your code. This is much simpler if the target language can be driven from Python. See :doc:`wrapperkernels` for details. 2. You can implement the kernel machinery in your target language. This is more work initially, but the people using your kernel might be more likely to contribute to it if it's in the language they know. 3. You can use the `xeus `_ library that is a C++ implementation of the Jupyter kernel protocol. Kernel authors only need to implement the language-specific logic in their implementation (execute code, auto-completion...). This is the simplest solution if your target language can be driven from C or C++: e.g. if it has a C-API like most scripting languages. Check out the `xeus documentation `_ for more details. Examples of kernels based on xeus include: - `xeus-cling `_ - `xeus-python `_ - `JuniperKernel `_ Connection files ================ Your kernel will be given the path to a connection file when it starts (see :ref:`kernelspecs` for how to specify the command line arguments for your kernel). This file, which is accessible only to the current user, will contain a JSON dictionary looking something like this:: { "control_port": 50160, "shell_port": 57503, "transport": "tcp", "signature_scheme": "hmac-sha256", "stdin_port": 52597, "hb_port": 42540, "ip": "127.0.0.1", "iopub_port": 40885, "key": "a0436f6c-1916-498b-8eb9-e81ab9368e84" } The ``transport``, ``ip`` and five ``_port`` fields specify five ports which the kernel should bind to using `ZeroMQ `_. For instance, the address of the shell socket in the example above would be:: tcp://127.0.0.1:57503 New ports are chosen at random for each kernel started. ``signature_scheme`` and ``key`` are used to cryptographically sign messages, so that other users on the system can't send code to run in this kernel. See :ref:`wire_protocol` for the details of how this signature is calculated. Handling messages ================= After reading the connection file and binding to the necessary sockets, the kernel should go into an event loop, listening on the hb (heartbeat), control and shell sockets. :ref:`Heartbeat ` messages should be echoed back immediately on the same socket - the frontend uses this to check that the kernel is still alive. Messages on the control and shell sockets should be parsed, and their signature validated. See :ref:`wire_protocol` for how to do this. The kernel will send messages on the iopub socket to display output, and on the stdin socket to prompt the user for textual input. .. seealso:: :doc:`messaging` Details of the different sockets and the messages that come over them `Creating Language Kernels for IPython `_ A blog post by the author of `IHaskell `_, a Haskell kernel `simple_kernel `_ A simple example implementation of the kernel machinery in Python .. _kernelspecs: Kernel specs ============ A kernel identifies itself to IPython by creating a directory, the name of which is used as an identifier for the kernel. These may be created in a number of locations: +--------+--------------------------------------------+-----------------------------------+ | | Unix | Windows | +========+============================================+===================================+ | System | ``/usr/share/jupyter/kernels`` | ``%PROGRAMDATA%\jupyter\kernels`` | | | | | | | ``/usr/local/share/jupyter/kernels`` | | +--------+--------------------------------------------+-----------------------------------+ | Env | ``{sys.prefix}/share/jupyter/kernels`` | +--------+--------------------------------------------+-----------------------------------+ | User | ``~/.local/share/jupyter/kernels`` (Linux) | ``%APPDATA%\jupyter\kernels`` | | | | | | | ``~/Library/Jupyter/kernels`` (Mac) | | +--------+--------------------------------------------+-----------------------------------+ The user location takes priority over the system locations, and the case of the names is ignored, so selecting kernels works the same way whether or not the filesystem is case sensitive. Since kernelspecs show up in URLs and other places, a kernelspec is required to have a simple name, only containing ASCII letters, ASCII numbers, and the simple separators: ``-`` hyphen, ``.`` period, ``_`` underscore. Other locations may also be searched if the :envvar:`JUPYTER_PATH` environment variable is set. Inside the kernel directory, three types of files are presently used: ``kernel.json``, ``kernel.js``, and logo image files. Currently, no other files are used, but this may change in the future. Inside the directory, the most important file is *kernel.json*. This should be a JSON serialised dictionary containing the following keys and values: - **argv**: A list of command line arguments used to start the kernel. The text ``{connection_file}`` in any argument will be replaced with the path to the connection file. - **display_name**: The kernel's name as it should be displayed in the UI. Unlike the kernel name used in the API, this can contain arbitrary unicode characters. - **language**: The name of the language of the kernel. When loading notebooks, if no matching kernelspec key (may differ across machines) is found, a kernel with a matching ``language`` will be used. This allows a notebook written on any Python or Julia kernel to be properly associated with the user's Python or Julia kernel, even if they aren't listed under the same name as the author's. - **interrupt_mode** (optional): May be either ``signal`` or ``message`` and specifies how a client is supposed to interrupt cell execution on this kernel, either by sending an interrupt ``signal`` via the operating system's signalling facilities (e.g. ``SIGINT`` on POSIX systems), or by sending an ``interrupt_request`` message on the control channel (see :ref:`msging_interrupt`). If this is not specified the client will default to ``signal`` mode. - **env** (optional): A dictionary of environment variables to set for the kernel. These will be added to the current environment variables before the kernel is started. Existing environment variables can be referenced using ``${}`` and will be substituted with the corresponding value. Administrators should note that use of ``${}`` can expose sensitive variables and should use only in controlled circumstances. - **metadata** (optional): A dictionary of additional attributes about this kernel; used by clients to aid in kernel selection. Metadata added here should be namespaced for the tool reading and writing that metadata. For example, the kernel.json file for IPython looks like this:: { "argv": ["python3", "-m", "IPython.kernel", "-f", "{connection_file}"], "display_name": "Python 3", "language": "python" } To see the available kernel specs, run:: jupyter kernelspec list To start the terminal console or the Qt console with a specific kernel:: jupyter console --kernel bash jupyter qtconsole --kernel bash The notebook offers you the available kernels in a dropdown menu from the 'New' button. .. _packaging-kernels: Packaging ========= To release your kernel as a Python package, we recommend following the pattern used in the `echo_kernel`_, which uses the `hatch`_ build backend and a build file that creates the kernel directory with the ``kernel.json`` and kernel icons, which is included as ``shared-data``, ending up in the ``share/jupyter/kernels/`` folder in the user's installed environment. See `pyproject.toml`_ and `hatch_build.py`_ for more details. .. _hatch: https://hatch.pypa.io/latest/ .. _pyproject.toml: https://github.com/jupyter/echo_kernel/blob/main/pyproject.toml .. _hatch_build.py: https://github.com/jupyter/echo_kernel/blob/main/hatch_build.py .. _echo_kernel: https://github.com/jupyter/echo_kernel jupyter_client-8.6.2/docs/make.bat000066400000000000000000000161341462351563100171370ustar00rootroot00000000000000@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. xml to make Docutils-native XML files echo. pseudoxml to make pseudoxml-XML files for display purposes echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled echo. coverage to run coverage check of the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) REM Check if sphinx-build is available and fallback to Python version if any %SPHINXBUILD% 2> nul if errorlevel 9009 goto sphinx_python goto sphinx_ok :sphinx_python set SPHINXBUILD=python -m sphinx.__init__ %SPHINXBUILD% 2> nul if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) :sphinx_ok if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\jupyter_client.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\jupyter_client.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdf" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdfja" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf-ja cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) if "%1" == "coverage" ( %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage if errorlevel 1 exit /b 1 echo. echo.Testing of coverage in the sources finished, look at the ^ results in %BUILDDIR%/coverage/python.txt. goto end ) if "%1" == "xml" ( %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml if errorlevel 1 exit /b 1 echo. echo.Build finished. The XML files are in %BUILDDIR%/xml. goto end ) if "%1" == "pseudoxml" ( %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml if errorlevel 1 exit /b 1 echo. echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. goto end ) :end jupyter_client-8.6.2/docs/messaging.rst000066400000000000000000001763171462351563100202530ustar00rootroot00000000000000.. _messaging: ====================== Messaging in Jupyter ====================== This document explains the basic communications design and messaging specification for how Jupyter frontends and kernels communicate. The ZeroMQ_ library provides the low-level transport layer over which these messages are sent. .. important:: This document contains the authoritative description of the IPython messaging protocol. All developers are strongly encouraged to keep it updated as the implementation evolves, so that we have a single common reference for all protocol details. Versioning ========== The Jupyter message specification is versioned independently of the packages that use it. The current version of the specification is 5.4. .. note:: *New in* and *Changed in* messages in this document refer to versions of the **Jupyter message specification**, not versions of :mod:`jupyter_client`. Introduction ============ The basic design is explained in the following diagram: .. image:: figs/frontend-kernel.png :width: 450px :alt: IPython kernel/frontend messaging architecture. :align: center :target: ./figs/frontend-kernel.png A single kernel can be simultaneously connected to one or more frontends. The kernel has dedicated sockets for the following functions: 1. **Shell**: this single ROUTER socket allows multiple incoming connections from frontends, and this is the socket where requests for code execution, object information, prompts, etc. are made to the kernel by any frontend. The communication on this socket is a sequence of request/reply actions from each frontend and the kernel. 2. **IOPub**: this socket is the 'broadcast channel' where the kernel publishes all side effects (stdout, stderr, debugging events etc.) as well as the requests coming from any client over the shell socket and its own requests on the stdin socket. There are a number of actions in Python which generate side effects: :func:`print` writes to ``sys.stdout``, errors generate tracebacks, etc. Additionally, in a multi-client scenario, we want all frontends to be able to know what each other has sent to the kernel (this can be useful in collaborative scenarios, for example). This socket allows both side effects and the information about communications taking place with one client over the shell channel to be made available to all clients in a uniform manner. 3. **stdin**: this ROUTER socket is connected to all frontends, and it allows the kernel to request input from the active frontend when :func:`raw_input` is called. The frontend that executed the code has a DEALER socket that acts as a 'virtual keyboard' for the kernel while this communication is happening (illustrated in the figure by the black outline around the central keyboard). In practice, frontends may display such kernel requests using a special input widget or otherwise indicating that the user is to type input for the kernel instead of normal commands in the frontend. All messages are tagged with enough information (details below) for clients to know which messages come from their own interaction with the kernel and which ones are from other clients, so they can display each type appropriately. 4. **Control**: This channel is identical to Shell, but operates on a separate socket to avoid queueing behind execution requests. The control channel is used for shutdown and restart messages, as well as for debugging messages. For a smoother user experience, we recommend running the control channel in a separate thread from the shell channel, so that e.g. shutdown or debug messages can be processed immediately without waiting for a long-running shell message to be finished processing (such as an expensive execute request). 5. **Heartbeat**: This socket allows for simple bytestring messages to be sent between the frontend and the kernel to ensure that they are still connected. The actual format of the messages allowed on each of these channels is specified below. Messages are dicts of dicts with string keys and values that are reasonably representable in JSON. General Message Format ====================== A message is composed of five dictionaries. Message Header -------------- The message ``header`` contains information about the message, such as unique identifiers for the originating session and the actual message id, the type of message, the version of the Jupyter protocol, and the date the message was created. In addition, there is a username field, e.g. for the process that generated the message, if applicable. This can be useful in collaborative settings where multiple users may be interacting with the same kernel simultaneously, so that frontends can label the various messages in a meaningful way. .. sourcecode:: python { "msg_id": str, # typically UUID, must be unique per message "session": str, # typically UUID, should be unique per session "username": str, # ISO 8601 timestamp for when the message is created "date": str, # All recognized message type strings are listed below. "msg_type": str, # the message protocol version "version": "5.0", } .. note:: The ``session`` id in a message header identifies a unique entity with state, such as a kernel process or client process. A client session id, in message headers from a client, should be unique among all clients connected to a kernel. When a client reconnects to a kernel, it should use the same client session id in its message headers. When a client restarts, it should generate a new client session id. A kernel session id, in message headers from a kernel, should identify a particular kernel process. If a kernel is restarted, the kernel session id should be regenerated. The session id in a message header can be used to identify the sending entity. For example, if a client disconnects and reconnects to a kernel, and messages from the kernel have a different kernel session id than prior to the disconnect, the client should assume that the kernel was restarted. .. versionchanged:: 5.0 ``version`` key added to the header. .. versionchanged:: 5.1 ``date`` in the header was accidentally omitted from the spec prior to 5.1, but it has always been in the canonical implementation, so implementers are strongly encouraged to include it. It will be mandatory in 5.1. Parent header ------------- When a message is the "result" of another message, such as a side-effect (output or status) or direct reply, the ``parent_header`` is a copy of the ``header`` of the message that "caused" the current message. ``_reply`` messages MUST have a ``parent_header``, and side-effects *typically* have a parent. If there is no parent, an empty dict should be used. This parent is used by clients to route message handling to the right place, such as outputs to a cell. .. sourcecode:: { # parent_header is a copy of the request's header 'msg_id': '...', ... } Metadata -------- The ``metadata`` dict contains information about the message that is not part of the content. This is not often used, but can be an extra location to store information about requests and replies, such as extensions adding information about request or execution context. Content ------- The ``content`` dict is the body of the message. Its structure is dictated by the ``msg_type`` field in the header, described in detail for each message below. Buffers ------- Finally, a list of additional binary buffers can be associated with a message. While this is part of the protocol, no official messages make use of these buffers. They are used by extension messages, such as IPython Parallel's ``apply`` and some of ipywidgets' ``comm`` messages. A full message -------------- Combining all of these together, a complete message can be represented as the following dictionary of dictionaries (and one list):: { "header" : { "msg_id": "...", "msg_type": "...", ... }, "parent_header": {}, "metadata": {}, "content": {}, "buffers": [], } .. note:: This dictionary structure is *not* part of the Jupyter protocol that must be implemented by kernels and frontends; that would be :ref:`wire_protocol`, which dictates how this information is serialized over the wire. Deserialization is up to the Kernel or frontend implementation, but a dict like this would be a logical choice in most contexts. .. _msging_compatibility: Compatibility ============= Kernels must implement the :ref:`execute ` and :ref:`kernel info ` messages, along with the associated busy and idle :ref:`status` messages. All other message types are optional, although we recommend implementing :ref:`completion ` if possible. Kernels do not need to send any reply for messages they don't handle, and frontends should provide sensible behaviour if no reply arrives (except for the required execution and kernel info messages). :ref:`stdin messages ` are unique in that the request comes from the kernel, and the reply from the frontend. The frontend is not required to support this, but if it does not, it must set ``'allow_stdin' : False`` in its :ref:`execute requests `. In this case, the kernel may not send stdin requests. If that field is true, the kernel may send stdin requests and block waiting for a reply, so the frontend must answer. Both sides should allow unexpected message types, and extra fields in known message types, so that additions to the protocol do not break existing code. .. _wire_protocol: The Wire Protocol ================= The above message format is only a logical representation of the contents of Jupyter messages, but does not describe the actual *implementation* at the wire level in zeromq. This section describes the protocol that must be implemented by Jupyter kernels and clients talking to each other over zeromq. The reference implementation of the message spec is our :class:`~jupyter_client.session.Session` class. .. note:: This section should only be relevant to non-Python consumers of the protocol. Python consumers should import and the use implementation of the wire protocol in :class:`jupyter_client.session.Session`. Every message is serialized to a sequence of at least six blobs of bytes: .. sourcecode:: python [ b"u-u-i-d", # zmq identity(ies) b"", # delimiter b"baddad42", # HMAC signature b"{header}", # serialized header dict b"{parent_header}", # serialized parent header dict b"{metadata}", # serialized metadata dict b"{content}", # serialized content dict b"\xf0\x9f\x90\xb1" # extra raw data buffer(s) # ... ] The front of the message is the ZeroMQ routing prefix, which can be zero or more socket identities. This is every piece of the message prior to the delimiter key ````. In the case of IOPub, there should be just one prefix component, which is the topic for IOPub subscribers, e.g. ``execute_result``, ``display_data``. .. note:: In most cases, the IOPub topics are irrelevant and completely ignored, because frontends just subscribe to all topics. The convention used in the IPython kernel is to use the msg_type as the topic, and possibly extra information about the message, e.g. ``kernel.{u-u-i-d}.execute_result`` or ``stream.stdout`` After the delimiter is the `HMAC`_ signature of the message, used for authentication. If authentication is disabled, this should be an empty string. By default, the hashing function used for computing these signatures is sha256. .. _HMAC: https://en.wikipedia.org/wiki/HMAC .. note:: To disable authentication and signature checking, set the ``key`` field of a connection file to an empty string. The signature is the HMAC hex digest of the concatenation of: - A shared key (typically the ``key`` field of a connection file) - The serialized header dict - The serialized parent header dict - The serialized metadata dict - The serialized content dict In Python, this is implemented via: .. sourcecode:: python # once: digester = HMAC(key, digestmod=hashlib.sha256) # for each message d = digester.copy() for serialized_dict in (header, parent, metadata, content): d.update(serialized_dict) signature = d.hexdigest() After the signature is the actual message, always in four frames of bytes. The four dictionaries that compose a message are serialized separately, in the order of header, parent header, metadata, and content. These can be serialized by any function that turns a dict into bytes. The default and most common serialization is JSON, but msgpack and pickle are common alternatives. After the serialized dicts are zero to many raw data buffers, which can be used by message types that support binary data, which can be used in custom messages, such as comms and extensions to the protocol. Python API ========== As messages can be represented as dicts, they map naturally to a ``func(**kw)`` call form. We should develop, at a few key points, functional forms of all the requests that take arguments in this manner and automatically construct the necessary dict for sending. In addition, the Python implementation of the message specification extends messages upon deserialization to the following form for convenience:: { 'header' : dict, # The msg's unique identifier and type are always stored in the header, # but the Python implementation copies them to the top level. 'msg_id' : str, 'msg_type' : str, 'parent_header' : dict, 'content' : dict, 'metadata' : dict, 'buffers': list, } All messages sent to or received by any IPython message handler should have this extended structure. Messages on the shell (ROUTER/DEALER) channel ============================================= .. _request_reply: Request-Reply ------------- In general, the ROUTER/DEALER sockets follow a request-reply pattern: The client sends an ``_request`` message (such as ``execute_request``) on its shell (DEALER) socket. The kernel receives that request and immediately publishes a ``status: busy`` message on IOPub. The kernel then processes the request and sends the appropriate ``_reply`` message, such as ``execute_reply``. After processing the request and publishing associated IOPub messages, if any, the kernel publishes a ``status: idle`` message. This idle status message indicates that IOPub messages associated with a given request have all been received. All reply messages have a ``'status'`` field, which will have one of the following values: - ``status='ok'``: The request was processed successfully, and the remaining content of the reply is specified in the appropriate section below. - ``status='error'``: The request failed due to an error. When status is 'error', the usual content of a successful reply should be omitted, instead the following fields should be present:: { 'status' : 'error', 'ename' : str, # Exception name, as a string 'evalue' : str, # Exception value, as a string 'traceback' : list(str), # traceback frames as strings } - ``status='abort'``: This is the same as ``status='error'`` but with no information about the error. No fields should be present other that ``status``. As a special case, ``execute_reply`` messages (see :ref:`execution_results`) have an ``execution_count`` field regardless of their status. .. versionchanged:: 5.1 ``status='abort'`` has not proved useful, and is considered deprecated. Kernels should send ``status='error'`` instead. .. _execute: Execute ------- This message type is used by frontends to ask the kernel to execute code on behalf of the user, in a namespace reserved to the user's variables (and thus separate from the kernel's own internal code and variables). Message type: ``execute_request``:: content = { # Source code to be executed by the kernel, one or more lines. 'code' : str, # A boolean flag which, if True, signals the kernel to execute # this code as quietly as possible. # silent=True forces store_history to be False, # and will *not*: # - broadcast output on the IOPUB channel # - have an execute_result # The default is False. 'silent' : bool, # A boolean flag which, if True, signals the kernel to populate history # The default is True if silent is False. If silent is True, store_history # is forced to be False. 'store_history' : bool, # A dict mapping names to expressions to be evaluated in the # user's dict. The rich display-data representation of each will be evaluated after execution. # See the display_data content for the structure of the representation data. 'user_expressions' : dict, # Some frontends do not support stdin requests. # If this is true, code running in the kernel can prompt the user for input # with an input_request message (see below). If it is false, the kernel # should not send these messages. 'allow_stdin' : True, # A boolean flag, which, if True, aborts the execution queue if an exception is encountered. # If False, queued execute_requests will execute even if this request generates an exception. 'stop_on_error' : True, } .. versionchanged:: 5.0 ``user_variables`` removed, because it is redundant with user_expressions. The ``code`` field contains a single string (possibly multiline) to be executed. The ``user_expressions`` field deserves a detailed explanation. In the past, IPython had the notion of a prompt string that allowed arbitrary code to be evaluated, and this was put to good use by many in creating prompts that displayed system status, path information, and even more esoteric uses like remote instrument status acquired over the network. But now that IPython has a clean separation between the kernel and the clients, the kernel has no prompt knowledge; prompts are a frontend feature, and it should be even possible for different frontends to display different prompts while interacting with the same kernel. ``user_expressions`` can be used to retrieve this information. Any error in evaluating any expression in ``user_expressions`` will result in only that key containing a standard error message, of the form:: { 'status' : 'error', 'ename' : 'NameError', 'evalue' : 'foo', 'traceback' : ... } .. Note:: In order to obtain the current execution counter for the purposes of displaying input prompts, frontends may make an execution request with an empty code string and ``silent=True``. Upon completion of the execution request, the kernel *always* sends a reply, with a status code indicating what happened and additional data depending on the outcome. See :ref:`below ` for the possible return codes and associated data. .. seealso:: :ref:`execution_semantics` .. _execution_counter: Execution counter (prompt number) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The kernel should have a single, monotonically increasing counter of all execution requests that are made with ``store_history=True``. This counter is used to populate the ``In[n]`` and ``Out[n]`` prompts. The value of this counter will be returned as the ``execution_count`` field of all ``execute_reply`` and ``execute_input`` messages. .. _execution_results: Execution results ~~~~~~~~~~~~~~~~~ Message type: ``execute_reply``:: content = { # One of: 'ok' OR 'error' OR 'aborted' 'status' : str, # The global kernel counter that increases by one with each request that # stores history. This will typically be used by clients to display # prompt numbers to the user. If the request did not store history, this will # be the current value of the counter in the kernel. 'execution_count' : int, } When status is 'ok', the following extra fields are present:: { # 'payload' will be a list of payload dicts, and is optional. # payloads are considered deprecated. # The only requirement of each payload dict is that it have a 'source' key, # which is a string classifying the payload (e.g. 'page'). 'payload' : list(dict), # Results for the user_expressions. 'user_expressions' : dict, } .. versionchanged:: 5.0 ``user_variables`` is removed, use user_expressions instead. Payloads (DEPRECATED) ~~~~~~~~~~~~~~~~~~~~~ .. admonition:: Execution payloads Payloads are considered **deprecated**, though their replacement is not yet implemented. Payloads are a way to trigger frontend actions from the kernel. Current payloads: **page**: display data in a pager. Pager output is used for introspection, or other displayed information that's not considered output. Pager payloads are generally displayed in a separate pane, that can be viewed alongside code, and are not included in notebook documents. .. sourcecode:: python { "source": "page", # mime-bundle of data to display in the pager. # Must include text/plain. "data": mimebundle, # line offset to start from "start": int, } **set_next_input**: create a new output used to create new cells in the notebook, or set the next input in a console interface. The main example being ``%load``. .. sourcecode:: python { "source": "set_next_input", # the text contents of the cell to create "text": "some cell content", # If true, replace the current cell in document UIs instead of inserting # a cell. Ignored in console UIs. "replace": bool, } **edit_magic**: open a file for editing. Triggered by ``%edit``. Only the QtConsole currently supports edit payloads. .. sourcecode:: python { "source": "edit_magic", "filename": "/path/to/file.py", # the file to edit "line_number": int, # the line number to start with } **ask_exit**: instruct the frontend to prompt the user for exit Allows the kernel to request exit, e.g. via ``%exit`` in IPython. Only for console frontends. .. sourcecode:: python { "source": "ask_exit", # whether the kernel should be left running, only closing the client "keepkernel": bool, } .. _msging_inspection: Introspection ------------- Code can be inspected to show useful information to the user. It is up to the Kernel to decide what information should be displayed, and its formatting. Message type: ``inspect_request``:: content = { # The code context in which introspection is requested # this may be up to an entire multiline cell. 'code' : str, # The cursor position within 'code' (in unicode characters) where inspection is requested 'cursor_pos' : int, # The level of detail desired. In IPython, the default (0) is equivalent to typing # 'x?' at the prompt, 1 is equivalent to 'x??'. # The difference is up to kernels, but in IPython level 1 includes the source code # if available. 'detail_level' : 0 or 1, } .. versionchanged:: 5.0 ``object_info_request`` renamed to ``inspect_request``. .. versionchanged:: 5.0 ``name`` key replaced with ``code`` and ``cursor_pos``, moving the lexing responsibility to the kernel. .. versionchanged:: 5.2 Due to a widespread bug in many frontends, ``cursor_pos`` in versions prior to 5.2 is ambiguous in the presence of "astral-plane" characters. In 5.2, cursor_pos **must be** the actual encoding-independent offset in unicode codepoints. See :ref:`cursor_pos_unicode_note` for more. The reply is a mime-bundle, like a `display_data`_ message, which should be a formatted representation of information about the context. In the notebook, this is used to show tooltips over function calls, etc. Message type: ``inspect_reply``:: content = { # 'ok' if the request succeeded or 'error', with error information as in all other replies. 'status' : 'ok', # found should be true if an object was found, false otherwise 'found' : bool, # data can be empty if nothing is found 'data' : dict, 'metadata' : dict, } .. versionchanged:: 5.0 ``object_info_reply`` renamed to ``inspect_reply``. .. versionchanged:: 5.0 Reply is changed from structured data to a mime bundle, allowing formatting decisions to be made by the kernel. .. _msging_completion: Completion ---------- Message type: ``complete_request``:: content = { # The code context in which completion is requested # this may be up to an entire multiline cell, such as # 'foo = a.isal' 'code' : str, # The cursor position within 'code' (in unicode characters) where completion is requested 'cursor_pos' : int, } .. versionchanged:: 5.0 ``line``, ``block``, and ``text`` keys are removed in favor of a single ``code`` for context. Lexing is up to the kernel. .. versionchanged:: 5.2 Due to a widespread bug in many frontends, ``cursor_pos`` in versions prior to 5.2 is ambiguous in the presence of "astral-plane" characters. In 5.2, cursor_pos **must be** the actual encoding-independent offset in unicode codepoints. See :ref:`cursor_pos_unicode_note` for more. Message type: ``complete_reply``:: content = { # status should be 'ok' unless an exception was raised during the request, # in which case it should be 'error', along with the usual error message content # in other messages. 'status' : 'ok' # The list of all matches to the completion request, such as # ['a.isalnum', 'a.isalpha'] for the above example. 'matches' : list, # The range of text that should be replaced by the above matches when a completion is accepted. # typically cursor_end is the same as cursor_pos in the request. 'cursor_start' : int, 'cursor_end' : int, # Information that frontend plugins might use for extra display information about completions. 'metadata' : dict, } .. versionchanged:: 5.0 - ``matched_text`` is removed in favor of ``cursor_start`` and ``cursor_end``. - ``metadata`` is added for extended information. .. _msging_history: History ------- For clients to explicitly request history from a kernel. The kernel has all the actual execution history stored in a single location, so clients can request it from the kernel when needed. Message type: ``history_request``:: content = { # If True, also return output history in the resulting dict. 'output' : bool, # If True, return the raw input history, else the transformed input. 'raw' : bool, # So far, this can be 'range', 'tail' or 'search'. 'hist_access_type' : str, # If hist_access_type is 'range', get a range of input cells. session # is a number counting up each time the kernel starts; you can give # a positive session number, or a negative number to count back from # the current session. 'session' : int, # start and stop are line (cell) numbers within that session. 'start' : int, 'stop' : int, # If hist_access_type is 'tail' or 'search', get the last n cells. 'n' : int, # If hist_access_type is 'search', get cells matching the specified glob # pattern (with * and ? as wildcards). 'pattern' : str, # If hist_access_type is 'search' and unique is true, do not # include duplicated history. Default is false. 'unique' : bool, } .. versionadded:: 4.0 The key ``unique`` for ``history_request``. Message type: ``history_reply``:: content = { # 'ok' if the request succeeded or 'error', with error information as in all other replies. 'status' : 'ok', # A list of 3 tuples, either: # (session, line_number, input) or # (session, line_number, (input, output)), # depending on whether output was False or True, respectively. 'history' : list, } .. note:: Most of the history messaging options are not used by Jupyter frontends, and many kernels do not implement them. If you're implementing these messages in a kernel, the 'tail' request is the most useful; this is used by the Qt console, for example. The notebook interface does not use history messages at all. This interface was designed by exposing all the main options of IPython's history interface. We may remove some options in a future version of the message spec. .. _msging_is_complete: Code completeness ----------------- .. versionadded:: 5.0 When the user enters a line in a console style interface, the console must decide whether to immediately execute the current code, or whether to show a continuation prompt for further input. For instance, in Python ``a = 5`` would be executed immediately, while ``for i in range(5):`` would expect further input. There are four possible replies: - *complete* code is ready to be executed - *incomplete* code should prompt for another line - *invalid* code will typically be sent for execution, so that the user sees the error soonest. - *unknown* - if the kernel is not able to determine this. The frontend should also handle the kernel not replying promptly. It may default to sending the code for execution, or it may implement simple fallback heuristics for whether to execute the code (e.g. execute after a blank line). Frontends may have ways to override this, forcing the code to be sent for execution or forcing a continuation prompt. Message type: ``is_complete_request``:: content = { # The code entered so far as a multiline string 'code' : str, } Message type: ``is_complete_reply``:: content = { # One of 'complete', 'incomplete', 'invalid', 'unknown' 'status' : str, # If status is 'incomplete', indent should contain the characters to use # to indent the next line. This is only a hint: frontends may ignore it # and use their own autoindentation rules. For other statuses, this # field does not exist. 'indent': str, } Connect ------- .. deprecated:: 5.1 connect_request/reply have not proved useful, and are considered deprecated. Kernels are not expected to implement handlers for this message. When a client connects to the request/reply socket of the kernel, it can issue a connect request to get basic information about the kernel, such as the ports the other ZeroMQ sockets are listening on. This allows clients to only have to know about a single port (the shell channel) to connect to a kernel. The ports for any additional channels the kernel is listening on should be included in the reply. If any ports are omitted from the reply, this indicates that the channels are not running. Message type: ``connect_request``:: content = {} For example, a kernel with all channels running: Message type: ``connect_reply``:: content = { 'shell_port' : int, # The port the shell ROUTER socket is listening on. 'iopub_port' : int, # The port the PUB socket is listening on. 'stdin_port' : int, # The port the stdin ROUTER socket is listening on. 'hb_port' : int, # The port the heartbeat socket is listening on. 'control_port' : int, # The port the control ROUTER socket is listening on. } .. _msging_comm_info: Comm info --------- When a client needs the currently open comms in the kernel, it can issue a request for the currently open comms. When the optional ``target_name`` is specified, the reply only contains the currently open comms for the target. Message type: ``comm_info_request``:: content = { # Optional, the target name 'target_name': str, } Message type: ``comm_info_reply``:: content = { # 'ok' if the request succeeded or 'error', with error information as in all other replies. 'status' : 'ok', # A dictionary of the comms, indexed by uuids. 'comms': { comm_id: { 'target_name': str, }, }, } .. versionadded:: 5.1 .. _msging_kernel_info: Kernel info ----------- If a client needs to know information about the kernel, it can make a request of the kernel's information. This message can be used to fetch core information of the kernel, including language (e.g., Python), language version number and IPython version number, and the IPython message spec version number. Message type: ``kernel_info_request``:: content = { } Message type: ``kernel_info_reply``:: content = { # 'ok' if the request succeeded or 'error', with error information as in all other replies. 'status' : 'ok', # Version of messaging protocol. # The first integer indicates major version. It is incremented when # there is any backward incompatible change. # The second integer indicates minor version. It is incremented when # there is any backward compatible change. 'protocol_version': 'X.Y.Z', # The kernel implementation name # (e.g. 'ipython' for the IPython kernel) 'implementation': str, # Implementation version number. # The version number of the kernel's implementation # (e.g. IPython.__version__ for the IPython kernel) 'implementation_version': 'X.Y.Z', # Information about the language of code for the kernel 'language_info': { # Name of the programming language that the kernel implements. # Kernel included in IPython returns 'python'. 'name': str, # Language version number. # It is Python version number (e.g., '2.7.3') for the kernel # included in IPython. 'version': 'X.Y.Z', # mimetype for script files in this language 'mimetype': str, # Extension including the dot, e.g. '.py' 'file_extension': str, # Pygments lexer, for highlighting # Only needed if it differs from the 'name' field. 'pygments_lexer': str, # Codemirror mode, for highlighting in the notebook. # Only needed if it differs from the 'name' field. 'codemirror_mode': str or dict, # Nbconvert exporter, if notebooks written with this kernel should # be exported with something other than the general 'script' # exporter. 'nbconvert_exporter': str, }, # A banner of information about the kernel, # which may be displayed in console environments. 'banner': str, # A boolean flag which tells if the kernel supports debugging in the notebook. # Default is False 'debugger': bool, # Optional: A list of dictionaries, each with keys 'text' and 'url'. # These will be displayed in the help menu in the notebook UI. 'help_links': [ {'text': str, 'url': str} ], } Refer to the lists of available `Pygments lexers `_ and `codemirror modes `_ for those fields. .. versionchanged:: 5.0 Versions changed from lists of integers to strings. .. versionchanged:: 5.0 ``ipython_version`` is removed. .. versionchanged:: 5.0 ``language_info``, ``implementation``, ``implementation_version``, ``banner`` and ``help_links`` keys are added. .. versionchanged:: 5.0 ``language_version`` moved to ``language_info.version`` .. versionchanged:: 5.0 ``language`` moved to ``language_info.name`` Messages on the Control (ROUTER/DEALER) channel =============================================== .. _msging_shutdown: Kernel shutdown --------------- The clients can request the kernel to shut itself down; this is used in multiple cases: - when the user chooses to close the client application via a menu or window control. - when the user types 'exit' or 'quit' (or their uppercase magic equivalents). - when the user chooses a GUI method (like the 'Ctrl-C' shortcut in the IPythonQt client) to force a kernel restart to get a clean kernel without losing client-side state like history or inlined figures. Implementation recommendation for starting kernels: A restart should optimally preserve as many resources outside the kernel as possible (e.g. only restart the kernel and its subprocesses and not any parent processes). That is, ideally a restart should be "in-place". For local kernels, there is typically no parent process so a "hard" restart and an in-place restart are identical whereas for remote kernels this is not generally the same. As an example, if a remote kernel is run in a container, during an in-place restart the container may be kept running and a new kernel process within it would be started. The client sends a shutdown request to the kernel, and once it receives the reply message (which is otherwise empty), it can assume that the kernel has completed shutdown safely. The request is sent on the ``control`` channel. Upon their own shutdown, client applications will typically execute a last minute sanity check and forcefully terminate any kernel that is still alive, to avoid leaving stray processes in the user's machine. Message type: ``shutdown_request``:: content = { 'restart' : bool # False if final shutdown, or True if shutdown precedes a restart } Message type: ``shutdown_reply``:: content = { # 'ok' if the request succeeded or 'error', with error information as in all other replies. 'status' : 'ok', 'restart' : bool # False if final shutdown, or True if shutdown precedes a restart } .. Note:: When the clients detect a dead kernel thanks to inactivity on the heartbeat socket, they simply send a forceful process termination signal, since a dead process is unlikely to respond in any useful way to messages. .. versionchanged:: 5.4 Sending a ``shutdown_request`` message on the ``shell`` channel is deprecated. .. _msging_interrupt: Kernel interrupt ---------------- In case a kernel can not catch operating system interrupt signals (e.g. the used runtime handles signals and does not allow a user program to define a callback), a kernel can choose to be notified using a message instead. For this to work, the kernels kernelspec must set ``interrupt_mode`` to ``message``. An interruption will then result in the following message on the ``control`` channel: Message type: ``interrupt_request``:: content = {} Message type: ``interrupt_reply``:: content = { # 'ok' if the request succeeded or 'error', with error information as in all other replies. 'status' : 'ok' } .. versionadded:: 5.3 Debug request ------------- This message type is used with debugging kernels to request specific actions to be performed by the debugger such as adding a breakpoint or stepping into a code. Message type: ``debug_request``:: content = {} Message type: ``debug_reply``:: content = {} The ``content`` dicts of the ``debug_request`` and ``debug_reply`` messages respectively follow the specification of the ``Request`` and ``Response`` messages from the `Debug Adapter Protocol (DAP) `_ as of version 1.39 or later. Debug requests and replies are sent over the ``control`` channel to prevent queuing behind execution requests. Additions to the DAP ~~~~~~~~~~~~~~~~~~~~ The Jupyter debugger protocol makes several additions to the DAP: - the `dumpCell`_ request and response messages - the `debugInfo`_ request and response messages - the `inspectVariables`_ request and response messages - the `richInspectVariables`_ request and response messages - the `copyToGlobals`_ request and response messages dumpCell ######## In order to support the debugging of notebook cells and of Jupyter consoles, which are not based on source files, we need a message to submit code to the debugger to which breakpoints can be added. Content of the ``dumpCell`` request:: { 'type' : 'request', 'command' : 'dumpCell', 'arguments' : { 'code' : str # the content of the cell being submitted. } } Content of the ``dumpCell`` response:: { 'type' : 'response', 'success': bool, 'body': { 'sourcePath': str # filename for the dumped source } } debugInfo ######### In order to support page reloading, or a client connecting at a later stage, Jupyter kernels must store the state of the debugger (such as breakpoints, whether the debugger is currently stopped). The ``debugInfo`` request is a DAP ``Request`` with no extra argument. Content of the ``debugInfo`` request:: { 'type' : 'request', 'command' : 'debugInfo' } Content of the ``debugInfo`` response:: { 'type' : 'response', 'success' : bool, 'body' : { 'isStarted' : bool, # whether the debugger is started, 'hashMethod' : str, # the hash method for code cell. Default is 'Murmur2', 'hashSeed' : str, # the seed for the hashing of code cells, 'tmpFilePrefix' : str, # prefix for temporary file names 'tmpFileSuffix' : str, # suffix for temporary file names 'breakpoints' : [ # breakpoints currently registered in the debugger. { 'source' : str, # source file 'breakpoints' : list(source_breakpoints) # list of breakpoints for that source file } ], 'stoppedThreads' : list(int), # threads in which the debugger is currently in a stopped state 'richRendering' : bool, # whether the debugger supports rich rendering of variables 'exceptionPaths' : list(str), # exception names used to match leaves or nodes in a tree of exception } } The ``source_breakpoint`` schema is specified by the Debug Adapter Protocol. inspectVariables ################ The ``inspectVariables`` is meant to retrieve the values of all the variables that have been defined in the kernel. It is a DAP ``Request`` with no extra argument. Content of the ``inspectVariables`` request:: { 'type' : 'request', 'command' : 'inspectVariables' } Content of the ``inspectVariables`` response:: { 'type' : 'response', 'success' : bool, 'body' : { 'variables' : [ # variables defined in the notebook. { 'name' : str, 'variablesReference' : int, 'value' : str, 'type' : str } ] } } richInspectVariables #################### The ``richInspectVariables`` request allows to get the rich representation of a variable that has been defined in the kernel. Content of the ``richInspectVariables`` request:: { 'type' : 'request', 'command' : 'richInspectVariables', 'arguments' : { 'variableName' : str, # The frameId is used when the debugger hit a breakpoint only. 'frameId' : int } } Content of the ``richInspectVariables`` response:: { 'type' : 'response', 'success' : bool, 'body' : { # Dictionary of rich representations of the variable 'data' : dict, 'metadata' : dict } } copyToGlobals ############# The ``copyToGlobals`` request allows to copy a variable from the local variable panel of the debugger to the ``global`` scope to inspect it after debug session. Content of the ``copyToGlobals`` request:: { 'type': 'request', 'command': 'copyToGlobals', 'arguments': { # the variable to copy from the frame corresponding to `srcFrameId` 'srcVariableName': str, 'srcFrameId': int, # the copied variable name in the global scope 'dstVariableName': str } } Content of the ``copyToGlobals`` response:: { 'type': 'response', 'success': bool, 'command': 'setExpression', 'body': { # string representation of the copied variable 'value': str, # type of the copied variable 'type': str, 'variablesReference': int } } .. versionadded:: 5.5 Messages on the IOPub (PUB/SUB) channel ======================================= Streams (stdout, stderr, etc) ------------------------------ Message type: ``stream``:: content = { # The name of the stream is one of 'stdout', 'stderr' 'name' : str, # The text is an arbitrary string to be written to that stream 'text' : str, } .. versionchanged:: 5.0 'data' key renamed to 'text' for consistency with the notebook format. Display Data ------------ This type of message is used to bring back data that should be displayed (text, html, svg, etc.) in the frontends. This data is published to all frontends. Each message can have multiple representations of the data; it is up to the frontend to decide which to use and how. A single message should contain all possible representations of the same information. Each representation should be a JSON'able data structure, and should be a valid MIME type. Some questions remain about this design: * Do we use this message type for execute_result/displayhook? Probably not, because the displayhook also has to handle the Out prompt display. On the other hand we could put that information into the metadata section. .. _display_data: Message type: ``display_data``:: content = { # Who create the data # Used in V4. Removed in V5. # 'source' : str, # The data dict contains key/value pairs, where the keys are MIME # types and the values are the raw data of the representation in that # format. 'data' : dict, # Any metadata that describes the data 'metadata' : dict, # Optional transient data introduced in 5.1. Information not to be # persisted to a notebook or other documents. Intended to live only # during a live kernel session. 'transient': dict, } The ``metadata`` contains any metadata that describes the output. Global keys are assumed to apply to the output as a whole. The ``metadata`` dict can also contain mime-type keys, which will be sub-dictionaries, which are interpreted as applying only to output of that type. Third parties should put any data they write into a single dict with a reasonably unique name to avoid conflicts. The only metadata keys currently defined in IPython are the width and height of images:: metadata = { 'image/png' : { 'width': 640, 'height': 480 } } and expanded for JSON data:: metadata = { 'application/json' : { 'expanded': True } } The ``transient`` dict contains runtime metadata that should not be persisted to document formats and is fully optional. The only transient key currently defined in Jupyter is ``display_id``:: transient = { 'display_id': 'abcd' } .. versionchanged:: 5.0 `application/json` data should be unpacked JSON data, not double-serialized as a JSON string. .. versionchanged:: 5.1 `transient` is a new field. Update Display Data ------------------- .. versionadded:: 5.1 Displays can now be named with a ``display_id`` within the ``transient`` field of ``display_data`` or ``execute_result``. When a ``display_id`` is specified for a display, it can be updated later with an ``update_display_data`` message. This message has the same format as `display_data`_ messages and must contain a ``transient`` field with a ``display_id``. .. _update_display_data: Message type: ``update_display_data``:: content = { # The data dict contains key/value pairs, where the keys are MIME # types and the values are the raw data of the representation in that # format. 'data' : dict, # Any metadata that describes the data 'metadata' : dict, # Any information not to be persisted to a notebook or other environment # Intended to live only during a kernel session 'transient': dict, } Frontends can choose how they update prior outputs (or if they regard this as a regular ``display_data`` message). Within the jupyter and nteract_ notebooks, all displays that match the ``display_id`` are updated (even if there are multiple). Code inputs ----------- To let all frontends know what code is being executed at any given time, these messages contain a re-broadcast of the ``code`` portion of an :ref:`execute_request `, along with the :ref:`execution_count `. Message type: ``execute_input``:: content = { 'code' : str, # Source code to be executed, one or more lines # The counter for this execution is also provided so that clients can # display it, since IPython automatically creates variables called _iN # (for input prompt In[N]). 'execution_count' : int } .. versionchanged:: 5.0 ``pyin`` is renamed to ``execute_input``. Execution results ----------------- Results of an execution are published as an ``execute_result``. These are identical to `display_data`_ messages, with the addition of an ``execution_count`` key. Results can have multiple simultaneous formats depending on its configuration. A plain text representation should always be provided in the ``text/plain`` mime-type. Frontends are free to display any or all of these according to its capabilities. Frontends should ignore mime-types they do not understand. The data itself is any JSON object and depends on the format. It is often, but not always a string. Message type: ``execute_result``:: content = { # The counter for this execution is also provided so that clients can # display it, since IPython automatically creates variables called _N # (for prompt N). 'execution_count' : int, # data and metadata are identical to a display_data message. # the object being displayed is that passed to the display hook, # i.e. the *result* of the execution. 'data' : dict, 'metadata' : dict, } Execution errors ---------------- When an error occurs during code execution Message type: ``error``:: content = { # Similar content to the execute_reply messages for the 'error' case, # except the 'status' and 'execution_count' fields are omitted. } .. versionchanged:: 5.0 ``pyerr`` renamed to ``error`` .. _status: Kernel status ------------- This message type is used by frontends to monitor the status of the kernel. Message type: ``status``:: content = { # When the kernel starts to handle a message, it will enter the 'busy' # state and when it finishes, it will enter the 'idle' state. # The kernel will publish state 'starting' exactly once at process startup. execution_state : ('busy', 'idle', 'starting') } When a kernel receives a request and begins processing it, the kernel shall immediately publish a status message with ``execution_state: 'busy'``. When that kernel has completed processing the request and has finished publishing associated IOPub messages, if any, it shall publish a status message with ``execution_state: 'idle'``. Thus, the outputs associated with a given execution shall generally arrive between the busy and idle status messages associated with a given request. .. note:: **A caveat for asynchronous output** Asynchronous output (e.g. from background threads) may be produced after the kernel has sent the idle status message that signals the completion of the request. The handling of these out-of-order output messages is currently undefined in this specification, but the Jupyter Notebook continues to handle IOPub messages associated with a given request after the idle message has arrived, as long as the output area corresponding to that request is still active. .. versionchanged:: 5.0 Busy and idle messages should be sent before/after handling every request, not just execution. Clear output ------------ This message type is used to clear the output that is visible on the frontend. Message type: ``clear_output``:: content = { # Wait to clear the output until new output is available. Clears the # existing output immediately before the new output is displayed. # Useful for creating simple animations with minimal flickering. 'wait' : bool, } .. versionchanged:: 4.1 ``stdout``, ``stderr``, and ``display`` boolean keys for selective clearing are removed, and ``wait`` is added. The selective clearing keys are ignored in v4 and the default behavior remains the same, so v4 clear_output messages will be safely handled by a v4.1 frontend. .. _debug_event: Debug event ----------- This message type is used by debugging kernels to send debugging events to the frontend. Message type: ``debug_event``:: content = {} The ``content`` dict follows the specification of the ``Event`` message from the `Debug Adapter Protocol (DAP) `_. .. versionadded:: 5.5 .. _stdin_messages: Messages on the stdin (ROUTER/DEALER) channel ============================================= With the stdin ROUTER/DEALER socket, the request/reply pattern goes in the opposite direction of most kernel communication. With the stdin socket, the kernel makes the request, and the single frontend provides the response. This pattern allows code to prompt the user for a line of input, which would normally be read from stdin in a terminal. Many programming languages provide a function which displays a prompt, blocks until the user presses return, and returns the text they typed before pressing return. In Python 3, this is the ``input()`` function; in R it is called ``readline()``. If the :ref:`execute_request ` message has ``allow_stdin==True``, kernels may implement these functions so that they send an ``input_request`` message and wait for a corresponding ``input_reply``. The frontend is responsible for displaying the prompt and getting the user's input. If ``allow_stdin`` is False, the kernel must not send ``stdin_request``. The kernel may decide what to do instead, but it's most likely that calls to the 'prompt for input' function should fail immediately in this case. Message type: ``input_request``:: content = { # the text to show at the prompt 'prompt' : str, # Is the request for a password? # If so, the frontend shouldn't echo input. 'password' : bool } Message type: ``input_reply``:: content = { 'value' : str } When ``password`` is True, the frontend should not show the input as it is entered. Different frontends may obscure it in different ways; e.g. showing each character entered as the same neutral symbol, or not showing anything at all as the user types. .. versionchanged:: 5.0 ``password`` key added. .. note:: The stdin socket of the client is required to have the same zmq IDENTITY as the client's shell socket. Because of this, the ``input_request`` must be sent with the same IDENTITY routing prefix as the ``execute_reply`` in order for the frontend to receive the message. .. note:: This pattern of requesting user input is quite different from how stdin works at a lower level. The Jupyter protocol does not support everything code running in a terminal can do with stdin, but we believe that this enables the most common use cases. .. _kernel_heartbeat: Heartbeat for kernels ===================== Clients send ping messages on a REQ socket, which are echoed right back from the Kernel's REP socket. These are simple bytestrings, not full JSON messages described above. Custom Messages =============== .. versionadded:: 4.1 Message spec 4.1 (IPython 2.0) added a messaging system for developers to add their own objects with Frontend and Kernel-side components, and allow them to communicate with each other. To do this, IPython adds a notion of a ``Comm``, which exists on both sides, and can communicate in either direction. These messages are fully symmetrical - both the Kernel and the Frontend can send each message, and no messages expect a reply. The Kernel listens for these messages on the Shell channel, and the Frontend listens for them on the IOPub channel. Opening a Comm -------------- Opening a Comm produces a ``comm_open`` message, to be sent to the other side:: { 'comm_id' : 'u-u-i-d', 'target_name' : 'my_comm', 'data' : {} } Every Comm has an ID and a target name. The code handling the message on the receiving side is responsible for maintaining a mapping of target_name keys to constructors. After a ``comm_open`` message has been sent, there should be a corresponding Comm instance on both sides. The ``data`` key is always a dict and can be any extra JSON information used in initialization of the comm. If the ``target_name`` key is not found on the receiving side, then it should immediately reply with a ``comm_close`` message to avoid an inconsistent state. Comm Messages ------------- Comm messages are one-way communications to update comm state, used for synchronizing widget state, or simply requesting actions of a comm's counterpart. Essentially, each comm pair defines their own message specification implemented inside the ``data`` dict. There are no expected replies (of course, one side can send another ``comm_msg`` in reply). Message type: ``comm_msg``:: { 'comm_id' : 'u-u-i-d', 'data' : {} } Tearing Down Comms ------------------ Since comms live on both sides, when a comm is destroyed the other side must be notified. This is done with a ``comm_close`` message. Message type: ``comm_close``:: { 'comm_id' : 'u-u-i-d', 'data' : {} } Output Side Effects ------------------- Since comm messages can execute arbitrary user code, handlers should set the parent header and publish status busy / idle, just like an execute request. Changelog ========= 5.5 (draft) ----------- - Added ``debug_request/reply`` messages - Added ``debug_event`` message 5.4 --- - Sending a ``shutdown_request`` message on the ``shell`` channel is deprecated. It should be sent on the control channel. 5.3 --- - Kernels can now opt to be interrupted by a message sent on the control channel instead of a system signal. See :ref:`kernelspecs` and :ref:`msging_interrupt`. 5.2 --- - Resolve ambiguity of ``cursor_pos`` field in the presence of unicode surrogate pairs. In 5.2, cursor_pos **must be** the actual encoding-independent offset in unicode codepoints. .. seealso:: :ref:`cursor_pos_unicode_note` 5.1 --- - ``date`` in the header was accidentally omitted from the spec prior to 5.1, but it has always been in the canonical implementation, so implementers are strongly encouraged to include it. It is mandatory in 5.1. - ``status='abort'`` in replies has not proved useful, and is considered deprecated. Kernels should send ``status='error'`` instead. - ``comm_info_request/reply`` added - ``connect_request/reply`` have not proved useful, and are considered deprecated. Kernels are not expected to implement handlers for this message. - new ``transient`` field in ``display_data`` - new ``update_display_data`` message 5.0 --- General changes: - ``version`` key added to message headers - busy and idle status messages should be sent before/after handling every request, not just execution Message renames to remove Python-specific-ness: - ``pyin`` message renamed to ``execute_input`` - ``pyerr`` renamed to ``error`` - ``object_info_request/reply`` messages renamed to ``inspect_request/reply`` Kernel info: - versions changed from lists of integers to strings - ``ipython_version`` is removed - ``language_info``, ``implementation``, ``implementation_version``, ``banner`` and ``help_links`` keys are added. - ``language_version`` is moved to ``language_info.version`` - ``language`` is moved to ``language_info.name`` Execution: - ``user_variables`` is removed from ``execute_request/reply`` because it is redundant with ``user_expressions`` - ``password`` key added to ``input_request`` Output: - ``data`` key in stream messages renamed to ``text`` for consistency with the notebook format. - ``application/json`` in mimebundles should be unpacked JSON data, not a double-serialized JSON string. Inspection: - ``name`` key in ``inspect_request`` replaced with ``code`` and ``cursor_pos``, moving the lexing responsibility to the kernel. - ``object_info_reply`` is now a mimebundle, allowing formatting decisions to be made by the kernel. Completion: - ``complete_request``: ``line``, ``block``, and ``text`` keys are removed in favor of a single ``code`` for context. Lexing is up to the kernel. - ``complete_reply``: - ``matched_text`` is removed in favor of ``cursor_start`` and ``cursor_end``. - ``metadata`` is added for extended information. - new ``is_complete_request`` and ``is_complete_reply`` messages 4.1 --- - ``comm_open/close/msg`` messages added - ``clear_output``: ``stdout``, ``stderr``, and ``display`` boolean keys for selective clearing are removed, and ``wait`` is added. The selective clearing keys are ignored in v4 and the default behavior remains the same, so v4 ``clear_output`` messages will be safely handled by a v4.1 frontend. Notes ===== .. _cursor_pos_unicode_note: ``cursor_pos`` and unicode offsets ---------------------------------- Many frontends, especially those implemented in javascript, reported cursor_pos as the interpreter's string index, which is not the same as the unicode character offset if the interpreter uses UTF-16 (e.g. javascript or Python 2 on macOS), which stores "astral-plane" characters such as ``𝐚 (U+1D41A)`` as surrogate pairs, taking up two indices instead of one, causing a unicode offset drift of one per astral-plane character. Not all frontends have this behavior, however, and after JSON serialization information about which encoding was used when calculating the offset is lost, so assuming ``cursor_pos`` is calculated in UTF-16 could result in a similarly incorrect offset for frontends that did the right thing. For this reason, in protocol versions prior to 5.2, ``cursor_pos`` is officially ambiguous in the presence of astral plane unicode characters. Frontends claiming to implement protocol 5.2 **MUST** identify cursor_pos as the encoding-independent unicode character offset. Kernels may choose to expect the UTF-16 offset from requests implementing protocol 5.1 and earlier, in order to behave correctly with the most popular frontends. But they should know that doing so *introduces* the inverse bug for the frontends that do not have this bug. As an example, use a python3 kernel and evaluate ``𨭎𨭎𨭎𨭎𨭎 = 10``. Then type ``𨭎𨭎`` followed by the tab key and see if it properly completes. Known affected frontends (as of 2017-06): - Jupyter Notebook < 5.1 - JupyterLab < 0.24 - nteract < 0.2.0 - Jupyter Console and QtConsole with Python 2 on macOS and Windows Known *not* affected frontends: - QtConsole, Jupyter Console with Python 3 or Python 2 on Linux, CoCalc .. seealso:: `Discussion on GitHub `_ .. _ZeroMQ: http://zeromq.org .. _nteract: https://nteract.io jupyter_client-8.6.2/docs/migration.md000066400000000000000000000101031462351563100200330ustar00rootroot00000000000000# Migration Guide ## Jupyter Client 6.0 to 7.0 ### API Changes All of the API changes for `KernelManager` and `AsyncKernelManager` in the 7.0 release were confined to _internal public_ methods, which we define as methods called from the _formally public_ methods but could be overridden in subclass implementations. As a result, these changes may impact subclasses of `KernelManager` or `AsyncKernelManager` provided those implementations also implement or call these methods, but should not affect applications that call only the _formally public_ methods. #### `KernelManager` The following internal methods had signature changes: - `def pre_start_kernel(self, **kwargs) -> Tuple[List[str], Dict[str, Any]]:` - `pre_start_kernel` now returns a tuple consisting of the formatted kernel startup list and an updated set of keyword arguments. - `def _launch_kernel(self, kernel_cmd: List[str], **kw) -> None:` - `_launch_kernel` now returns `None` instead of the `Popen` instance - These methods now take the keyword argument `restart` indicating the shutdown was on behalf of a kernel restart (when `True`). - `def finish_shutdown(self, restart: bool = False):` - `def _kill_kernel(self, restart: bool = False):` - `def _send_kernel_sigterm(self, restart: bool = False):` - Attribute `kernel` has been removed and _logically_ replaced with `provisioner` - which is an instance of `KernelProvisionerBase` and can be viewed as an abstract `Popen` instance. #### `AsyncKernelManager` Besides the signature and attribute changes described above, the following internal methods were made `async` for `AsyncKernelManager`: - `async def pre_start_kernel(self, **kwargs) -> Tuple[List[str], Dict[str, Any]]:` - `async def post_start_kernel(self, **kwargs):` - `async def request_shutdown(self, restart: bool = False):` - `async def cleanup_resources(self, restart: bool = False):` #### `AsyncKernelClient` We dropped the `block: bool = True` keyword argument for the following methods: - `async def get_shell_msg(self, timeout: Optional[float] = None) -> Dict[str, Any]:` - `async def get_iopub_msg(self, timeout: Optional[float] = None) -> Dict[str, Any]:` - `async def get_stdin_msg(self, timeout: Optional[float] = None) -> Dict[str, Any]:` - `async def get_control_msg(self, timeout: Optional[float] = None) -> Dict[str, Any]:` Calling these methods with `block=False` previously translates to calling them with `timeout=0` now. Calling these methods with `block=True` previously translates to calling them with a non-zero `timeout` value now. #### `BlockingKernelClient` We dropped the `block: bool = True` keyword argument for the following methods: - `def get_shell_msg(self, timeout: Optional[float] = None) -> Dict[str, Any]:` - `def get_iopub_msg(self, timeout: Optional[float] = None) -> Dict[str, Any]:` - `def get_stdin_msg(self, timeout: Optional[float] = None) -> Dict[str, Any]:` - `def get_control_msg(self, timeout: Optional[float] = None) -> Dict[str, Any]:` Calling these methods with `block=False` previously translates to calling them with `timeout=0` now. Calling these methods with `block=True` previously translates to calling them with a non-zero `timeout` value now. #### `BlockingKernelClient` #### `ZMQSocketChannel` We dropped the `block: bool = True` keyword argument for the following method: - `async def get_msg(self, timeout: Optional[float] = None) -> Dict[str, Any]:`: Calling this method with `block=False` previously translates to calling it with `timeout=0` now. Calling this method with `block=True` previously translates to calling it with a non-zero `timeout` value now. ```{admonition} Note Prefer calling e.g. `client.get_shell_msg()` over `client.shell_channel.get_msg()`. ``` ### Deprecations removed #### Method `KernelManager.cleanup()` The `cleanup()` method on `KernelManager` has been removed. `cleanup_resources(restart: bool = False)` should be used. #### Attribute `KernelManager.kernel_cmd` This attribute had been marked for deprecation for 4 years. The command used to start the kernel is derived from the `argv` stanza of the kernel specification file (`kernel.json`). jupyter_client-8.6.2/docs/pending-kernels.rst000066400000000000000000000035261462351563100213520ustar00rootroot00000000000000Pending Kernels =============== *Added in 7.1.0* In scenarios where an kernel takes a long time to start (e.g. kernels running remotely), it can be advantageous to immediately return the kernel's model and ID from key methods like ``.start_kernel()`` and ``.shutdown_kernel()``. The kernel will continue its task without blocking other managerial actions. This intermediate state is called a **"pending kernel"**. How they work ------------- When ``.start_kernel()`` or ``.shutdown_kernel()`` is called, a ``Future`` is created under the ``KernelManager.ready`` property. This property can be awaited anytime to ensure that the kernel moves out of its pending state, e.g.: .. code-block:: python # await a Kernel Manager's `.ready` property to # block further action until the kernel is out # of its pending state. await kernel_manager.ready Once the kernel is finished pending, ``.ready.done()`` will be ``True`` and either 1) ``.ready.result()`` will return ``None`` or 2) ``.ready.exception()`` will return a raised exception Using pending kernels --------------------- The most common way to interact with pending kernels is through the `` MultiKernelManager``—the object that manages a collection of kernels—by setting its ``use_pending_kernels`` trait to ``True``. Pending kernels are "opt-in"; they are not used by default in the ``MultiKernelManager``. When ``use_pending_kernels`` is ``True``, the following changes are made to the ``MultiKernelManager``: 1. ``start_kernel`` and ``stop_kernel`` return immediately while running the pending task in a background thread. 2. The following methods raise a ``RuntimeError`` if a kernel is pending: * ``restart_kernel`` * ``interrupt_kernel`` * ``shutdown_kernel`` 3. ``shutdown_all`` will wait for all pending kernels to become ready before attempting to shut them down. jupyter_client-8.6.2/docs/provisioning.rst000066400000000000000000000356531462351563100210210ustar00rootroot00000000000000.. _provisioning: Customizing the kernel's runtime environment ============================================ Kernel Provisioning ~~~~~~~~~~~~~~~~~~~ Introduced in the 7.0 release, Kernel Provisioning enables the ability for third parties to manage the lifecycle of a kernel's runtime environment. By implementing and configuring a *kernel provisioner*, third parties now have the ability to provision kernels for different environments, typically managed by resource managers like Kubernetes, Hadoop YARN, Slurm, etc. For example, a *Kubernetes Provisioner* would be responsible for launching a kernel within its own Kubernetes pod, communicating the kernel's connection information back to the application (residing in a separate pod), and terminating the pod upon the kernel's termination. In essence, a kernel provisioner is an *abstraction layer* between the ``KernelManager`` and today's kernel *process* (i.e., ``Popen``). The kernel manager and kernel provisioner relationship ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Prior to this enhancement, the only extension point for customizing a kernel's behavior could occur by subclassing ``KernelManager``. This proved to be a limitation because the Jupyter framework allows for a single ``KernelManager`` class at any time. While applications could introduce a ``KernelManager`` subclass of their own, that ``KernelManager`` was then tied directly to *that* application and thereby not usable as a ``KernelManager`` in another application. As a result, we consider the ``KernelManager`` class to be an *application-owned entity* upon which application-specific behaviors can be implemented. Kernel provisioners, on the other hand, are contained within the ``KernelManager`` (i.e., a *has-a* relationship) and applications are agnostic as to what *kind* of provisioner is in use other than what is conveyed via the kernel's specification (kernelspec). All kernel interactions still occur via the ``KernelManager`` and ``KernelClient`` classes within ``jupyter_client`` and potentially subclassed by the application. Kernel provisioners are not related in any way to the ``KernelManager`` instance that controls their lifecycle, nor do they have any affinity to the application within which they are used. They merely provide a vehicle by which authors can extend the landscape in which a kernel can reside, while not side-effecting the application. That said, some kernel provisioners may introduce requirements on the application. For example (and completely hypothetically speaking), a ``SlurmProvisioner`` may impose the constraint that the server (``jupyter_client``) resides on an edge node of the Slurm cluster. These kinds of requirements can be mitigated by leveraging applications like `Jupyter Kernel Gateway `_ or `Jupyter Enterprise Gateway `_ where the gateway server resides on the edge node of (or within) the cluster, etc. Discovery ~~~~~~~~~ Kernel provisioning does not alter today's kernel discovery mechanism that utilizes well-known directories of ``kernel.json`` files. Instead, it optionally extends the current ``metadata`` stanza within the ``kernel.json`` to include the specification of the kernel provisioner name, along with an optional ``config`` stanza, consisting of provisioner-specific configuration items. For example, a container-based provisioner will likely need to specify the image name in this section. The important point is that the content of this section is provisioner-specific. .. code:: JSON "metadata": { "kernel_provisioner": { "provisioner_name": "k8s-provisioner", "config": { "image_name": "my_docker_org/kernel:2.1.5", "max_cpus": 4 } } }, Kernel provisioner authors implement their provisioners by deriving from :class:`KernelProvisionerBase` and expose their provisioner for consumption via entry-points: .. code:: 'jupyter_client.kernel_provisioners': [ 'k8s-provisioner = my_package:K8sProvisioner', ], Backwards Compatibility ~~~~~~~~~~~~~~~~~~~~~~~ Prior to this release, no ``kernel.json`` (kernelspec) will contain a provisioner entry, yet the framework is now based on using provisioners. As a result, when a ``kernel_provisioner`` stanza is **not** present in a selected kernelspec, jupyter client will, by default, use the built-in ``LocalProvisioner`` implementation as its provisioner. This provisioner retains today's local kernel functionality. It can also be subclassed for those provisioner authors wanting to extend the functionality of local kernels. The result of launching a kernel in this manner is equivalent to the following stanza existing in the ``kernel.json`` file: .. code:: JSON "metadata": { "kernel_provisioner": { "provisioner_name": "local-provisioner", "config": { } } }, Should a given installation wish to use a *different* provisioner as their "default provisioner" (including subclasses of ``LocalProvisioner``), they can do so by specifying a value for ``KernelProvisionerFactory.default_provisioner_name``. Implementing a custom provisioner ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The impact of Kernel Provisioning is that it enables the ability to implement custom kernel provisioners to manage a kernel's lifecycle within any runtime environment. There are currently two approaches by which that can be accomplished, extending the ``KernelProvisionerBase`` class or extending the built-in class - ``LocalProvisioner``. As more provisioners are introduced, some may be implemented in an abstract sense, from which specific implementations can be authored. Extending ``LocalProvisioner`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If you're interested in running kernels locally and yet adjust their behavior, there's a good chance you can simply extend ``LocalProvisioner`` via subclassing. This amounts to deriving from ``LocalProvisioner`` and overriding appropriate methods to provide your custom functionality. In this example, RBACProvisioner will verify whether the current user is in the role meant for this kernel by calling a method implemented within *this* provisioner. If the user is not in the role, an exception will be thrown. .. code:: python class RBACProvisioner(LocalProvisioner): role: str = Unicode(config=True) async def pre_launch(self, **kwargs: Any) -> Dict[str, Any]: if not self.user_in_role(self.role): raise PermissionError( f"User is not in role {self.role} and " f"cannot launch this kernel." ) return await super().pre_launch(**kwargs) It is important to note *when* it's necessary to call the superclass in a given method - since the operations it performs may be critical to the kernel's management. As a result, you'll likely need to become familiar with how ``LocalProvisioner`` operates. Extending ``KernelProvisionerBase`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If you'd like to launch your kernel in an environment other than the local server, then you will need to consider subclassing :class:`KernelProvisionerBase` directly. This will allow you to implement the various kernel process controls relative to your target environment. For instance, if you wanted to have your kernel hosted in a Hadoop YARN cluster, you will need to implement process-control methods like :meth:`poll` and :meth:`wait` to use the YARN REST API. Or, similarly, a Kubernetes-based provisioner would need to implement the process-control methods using the Kubernetes client API, etc. By modeling the :class:`KernelProvisionerBase` methods after :class:`subprocess.Popen` a natural mapping between today's kernel lifecycle management takes place. This, coupled with the ability to add configuration directly into the ``config:`` stanza of the ``kernel_provisioner`` metadata, allows for things like endpoint address, image names, namespaces, hosts lists, etc. to be specified relative to your kernel provisioner implementation. The ``kernel_id`` corresponding to the launched kernel and used by the kernel manager is now available *prior* to the kernel's launch. This enables provisioners with a unique *key* they can use to discover and control their kernel when launched into resource-managed clusters such as Hadoop YARN or Kubernetes. .. tip:: Use ``kernel_id`` as a discovery mechanism from your provisioner! Here's a prototyped implementation of a couple of the abstract methods of :class:`KernelProvisionerBase` for use in an Hadoop YARN cluster to help illustrate a provisioner's implementation. Note that the built-in implementation of :class:`LocalProvisioner` can also be used as a reference. Notice the internal method ``_get_application_id()``. This method is what the provisioner uses to determine if the YARN application (i.e., the kernel) is still running within the cluster. Although the provisioner doesn't dictate the application id, the application id is discovered via the application *name* which is a function of ``kernel_id``. .. code:: python async def poll(self) -> Optional[int]: """Submitting a new kernel/app to YARN will take a while to be ACCEPTED. Thus application ID will probably not be available immediately for poll. So will regard the application as RUNNING when application ID still in ACCEPTED or SUBMITTED state. :return: None if the application's ID is available and state is ACCEPTED/SUBMITTED/RUNNING. Otherwise 0. """ result = 0 if self._get_application_id(): state = self._query_app_state_by_id(self.application_id) if state in YarnProvisioner.initial_states: result = None return result async def send_signal(self, signum): """Currently only support 0 as poll and other as kill. :param signum :return: """ if signum == 0: return await self.poll() elif signum == signal.SIGKILL: return await self.kill() else: return await super().send_signal(signum) Notice how in some cases we can compose provisioner methods to implement others. For example, since sending a signal number of 0 is tantamount to polling the process, we go ahead and call :meth:`poll` to handle ``signum`` of 0 and :meth:`kill` to handle ``SIGKILL`` requests. Here we see how ``_get_application_id`` uses the ``kernel_id`` to acquire the application id - which is the *primary id* for controlling YARN application lifecycles. Since startup in resource-managed clusters can tend to take much longer than local kernels, you'll typically need a polling or notification mechanism within your provisioner. In addition, your provisioner will be asked by the ``KernelManager`` what is an acceptable startup time. This answer is implemented in the provisioner via the :meth:`get_shutdown_wait_time` method. .. code:: python def _get_application_id(self, ignore_final_states: bool = False) -> str: if not self.application_id: app = self._query_app_by_name(self.kernel_id) state_condition = True if type(app) is dict: state = app.get("state") self.last_known_state = state if ignore_final_states: state_condition = state not in YarnProvisioner.final_states if len(app.get("id", "")) > 0 and state_condition: self.application_id = app["id"] self.log.info( f"ApplicationID: '{app['id']}' assigned for " f"KernelID: '{self.kernel_id}', state: {state}." ) if not self.application_id: self.log.debug( f"ApplicationID not yet assigned for KernelID: " f"'{self.kernel_id}' - retrying..." ) return self.application_id def get_shutdown_wait_time(self, recommended: Optional[float] = 5.0) -> float: if recommended < yarn_shutdown_wait_time: recommended = yarn_shutdown_wait_time self.log.debug( f"{type(self).__name__} shutdown wait time adjusted to " f"{recommended} seconds." ) return recommended Registering your custom provisioner ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Once your custom provisioner has been authored, it needs to be exposed as an `entry point `_. To do this add the following to your ``setup.py`` (or equivalent) in its ``entry_points`` stanza using the group name ``jupyter_client.kernel_provisioners``: :: 'jupyter_client.kernel_provisioners': [ 'rbac-provisioner = acme.rbac.provisioner:RBACProvisioner', ], where: - ``rbac-provisioner`` is the *name* of your provisioner and what will be referenced within the ``kernel.json`` file - ``acme.rbac.provisioner`` identifies the provisioner module name, and - ``RBACProvisioner`` is custom provisioner object name (implementation) that (directly or indirectly) derives from ``KernelProvisionerBase`` Deploying your custom provisioner ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The final step in getting your custom provisioner deployed is to add a ``kernel_provisioner`` stanza to the appropriate ``kernel.json`` files. This can be accomplished manually or programmatically (in which some tooling is implemented to create the appropriate ``kernel.json`` file). In either case, the end result is the same - a ``kernel.json`` file with the appropriate stanza within ``metadata``. The *vision* is that kernel provisioner packages will include an application that creates kernel specifications (i.e., ``kernel.json`` et. al.) pertaining to that provisioner. Following on the previous example of ``RBACProvisioner``, one would find the following ``kernel.json`` file in directory ``/usr/local/share/jupyter/kernels/rbac_kernel``: .. code:: JSON { "argv": ["python", "-m", "ipykernel_launcher", "-f", "{connection_file}"], "env": {}, "display_name": "RBAC Kernel", "language": "python", "interrupt_mode": "signal", "metadata": { "kernel_provisioner": { "provisioner_name": "rbac-provisioner", "config": { "role": "data_scientist" } } } } Listing available kernel provisioners ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To confirm that your custom provisioner is available for use, the ``jupyter kernelspec`` command has been extended to include a ``provisioners`` sub-command. As a result, running ``jupyter kernelspec provisioners`` will list the available provisioners by name followed by their module and object names (colon-separated): .. code:: bash $ jupyter kernelspec provisioners Available kernel provisioners: local-provisioner jupyter_client.provisioning:LocalProvisioner rbac-provisioner acme.rbac.provisioner:RBACProvisioner jupyter_client-8.6.2/docs/wrapperkernels.rst000066400000000000000000000163421462351563100213310ustar00rootroot00000000000000Making simple Python wrapper kernels ==================================== You can reuse IPython's kernel machinery to easily make new kernels. This is useful for languages that have Python bindings, such as `Hy `_ (see `Calysto Hy `_), or languages where the REPL can be controlled in a tty using `pexpect `_, such as bash. .. seealso:: `bash_kernel `_ A simple kernel for bash, written using this machinery The `Metakernel `_ library makes it easier to write a wrapper kernel that includes a base set of line and cell magics. It also has a ``ProcessKernel`` subclass that makes it easy to write kernels that use ``pexpect``. See `Octave Kernel `_ as an example. If releasing a wrapper kernel as a Python package, see the steps in :ref:`packaging-kernels`. Required steps -------------- Subclass :class:`ipykernel.kernelbase.Kernel`, and implement the following methods and attributes: .. class:: MyKernel .. attribute:: implementation implementation_version banner Information for :ref:`msging_kernel_info` replies. 'Implementation' refers to the kernel (e.g. IPython), rather than the language (e.g. Python). The 'banner' is displayed to the user in console UIs before the first prompt. All of these values are strings. .. attribute:: language_info Language information for :ref:`msging_kernel_info` replies, in a dictionary. This should contain the key ``mimetype`` with the mimetype of code in the target language (e.g. ``'text/x-python'``), the ``name`` of the language being implemented (e.g. ``'python'``), and ``file_extension`` (e.g. ``'.py'``). It may also contain keys ``codemirror_mode`` and ``pygments_lexer`` if they need to differ from :attr:`language`. Other keys may be added to this later. .. method:: do_execute(code, silent, store_history=True, user_expressions=None, allow_stdin=False) Execute user code. :param str code: The code to be executed. :param bool silent: Whether to display output. :param bool store_history: Whether to record this code in history and increase the execution count. If silent is True, this is implicitly False. :param dict user_expressions: Mapping of names to expressions to evaluate after the code has run. You can ignore this if you need to. :param bool allow_stdin: Whether the frontend can provide input on request (e.g. for Python's :func:`raw_input`). Your method should return a dict containing the fields described in :ref:`execution_results`. To display output, it can send messages using :meth:`~ipykernel.kernelbase.Kernel.send_response`. If an error occurs during execution, an message of type `error` should be sent through :meth:`~ipykernel.kernelbase.Kernel.send_response` in addition to an :ref:`execution_results` with an `status` of `error`. See :doc:`messaging` for details of the different message types. .. automethod:: ipykernel.kernelbase.Kernel.send_response To launch your kernel, add this at the end of your module:: if __name__ == '__main__': from ipykernel.kernelapp import IPKernelApp IPKernelApp.launch_instance(kernel_class=MyKernel) Now create a `JSON kernel spec file `_ and install it using ``jupyter kernelspec install ``. Place your kernel module anywhere Python can import it (try current directory for testing). Finally, you can run your kernel using ``jupyter console --kernel ``. Note that ```` in the below example is ``echo``. Example ------- .. seealso:: `echo_kernel `_ A packaged, installable version of the condensed example below. ``echokernel.py`` will simply echo any input it's given to stdout:: from ipykernel.kernelbase import Kernel class EchoKernel(Kernel): implementation = 'Echo' implementation_version = '1.0' language = 'no-op' language_version = '0.1' language_info = { 'name': 'Any text', 'mimetype': 'text/plain', 'file_extension': '.txt', } banner = "Echo kernel - as useful as a parrot" def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False): if not silent: stream_content = {'name': 'stdout', 'text': code} self.send_response(self.iopub_socket, 'stream', stream_content) return {'status': 'ok', # The base class increments the execution count 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}, } if __name__ == '__main__': from ipykernel.kernelapp import IPKernelApp IPKernelApp.launch_instance(kernel_class=EchoKernel) Here's the Kernel spec ``kernel.json`` file for this:: {"argv":["python","-m","echokernel", "-f", "{connection_file}"], "display_name":"Echo" } Optional steps -------------- You can override a number of other methods to improve the functionality of your kernel. All of these methods should return a dictionary as described in the relevant section of the :doc:`messaging spec `. .. class:: MyCustomKernel .. method:: do_complete(code, cursor_pos) Code completion :param str code: The code already present :param int cursor_pos: The position in the code where completion is requested .. seealso:: :ref:`msging_completion` messages .. method:: do_inspect(code, cursor_pos, detail_level=0) Object introspection :param str code: The code :param int cursor_pos: The position in the code where introspection is requested :param int detail_level: 0 or 1 for more or less detail. In IPython, 1 gets the source code. .. seealso:: :ref:`msging_inspection` messages .. method:: do_history(hist_access_type, output, raw, session=None, start=None, stop=None, n=None, pattern=None, unique=False) History access. Only the relevant parameters for the type of history request concerned will be passed, so your method definition must have defaults for all the arguments shown with defaults here. .. seealso:: :ref:`msging_history` messages .. method:: do_is_complete(code) Is code entered in a console-like interface complete and ready to execute, or should a continuation prompt be shown? :param str code: The code entered so far - possibly multiple lines .. seealso:: :ref:`msging_is_complete` messages .. method:: do_shutdown(restart) Shutdown the kernel. You only need to handle your own clean up - the kernel machinery will take care of cleaning up its own things before stopping. :param bool restart: Whether the kernel will be started again afterwards .. seealso:: :ref:`msging_shutdown` messages jupyter_client-8.6.2/jupyter_client/000077500000000000000000000000001462351563100176355ustar00rootroot00000000000000jupyter_client-8.6.2/jupyter_client/__init__.py000066400000000000000000000010331462351563100217430ustar00rootroot00000000000000"""Client-side implementations of the Jupyter protocol""" from ._version import __version__, protocol_version, protocol_version_info, version_info from .asynchronous import AsyncKernelClient from .blocking import BlockingKernelClient from .client import KernelClient from .connect import * # noqa from .launcher import * # noqa from .manager import AsyncKernelManager, KernelManager, run_kernel from .multikernelmanager import AsyncMultiKernelManager, MultiKernelManager from .provisioning import KernelProvisionerBase, LocalProvisioner jupyter_client-8.6.2/jupyter_client/_version.py000066400000000000000000000011011462351563100220240ustar00rootroot00000000000000"""The version information for jupyter client.""" import re from typing import List, Union __version__ = "8.6.2" # Build up version_info tuple for backwards compatibility pattern = r"(?P\d+).(?P\d+).(?P\d+)(?P.*)" match = re.match(pattern, __version__) if match: parts: List[Union[int, str]] = [int(match[part]) for part in ["major", "minor", "patch"]] if match["rest"]: parts.append(match["rest"]) else: parts = [] version_info = tuple(parts) protocol_version_info = (5, 3) protocol_version = "%i.%i" % protocol_version_info jupyter_client-8.6.2/jupyter_client/adapter.py000066400000000000000000000340551462351563100216360ustar00rootroot00000000000000"""Adapters for Jupyter msg spec versions.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import json import re from typing import Any, Dict, List, Tuple from ._version import protocol_version_info def code_to_line(code: str, cursor_pos: int) -> Tuple[str, int]: """Turn a multiline code block and cursor position into a single line and new cursor position. For adapting ``complete_`` and ``object_info_request``. """ if not code: return "", 0 for line in code.splitlines(True): n = len(line) if cursor_pos > n: cursor_pos -= n else: break return line, cursor_pos _match_bracket = re.compile(r"\([^\(\)]+\)", re.UNICODE) _end_bracket = re.compile(r"\([^\(]*$", re.UNICODE) _identifier = re.compile(r"[a-z_][0-9a-z._]*", re.I | re.UNICODE) def extract_oname_v4(code: str, cursor_pos: int) -> str: """Reimplement token-finding logic from IPython 2.x javascript for adapting object_info_request from v5 to v4 """ line, _ = code_to_line(code, cursor_pos) oldline = line line = _match_bracket.sub("", line) while oldline != line: oldline = line line = _match_bracket.sub("", line) # remove everything after last open bracket line = _end_bracket.sub("", line) matches = _identifier.findall(line) if matches: return matches[-1] else: return "" class Adapter: """Base class for adapting messages Override message_type(msg) methods to create adapters. """ msg_type_map: Dict[str, str] = {} def update_header(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Update the header.""" return msg def update_metadata(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Update the metadata.""" return msg def update_msg_type(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Update the message type.""" header = msg["header"] msg_type = header["msg_type"] if msg_type in self.msg_type_map: msg["msg_type"] = header["msg_type"] = self.msg_type_map[msg_type] return msg def handle_reply_status_error(self, msg: Dict[str, Any]) -> Dict[str, Any]: """This will be called *instead of* the regular handler on any reply with status != ok """ return msg def __call__(self, msg: Dict[str, Any]) -> Dict[str, Any]: msg = self.update_header(msg) msg = self.update_metadata(msg) msg = self.update_msg_type(msg) header = msg["header"] handler = getattr(self, header["msg_type"], None) if handler is None: return msg # handle status=error replies separately (no change, at present) if msg["content"].get("status", None) in {"error", "aborted"}: return self.handle_reply_status_error(msg) return handler(msg) def _version_str_to_list(version: str) -> List[int]: """convert a version string to a list of ints non-int segments are excluded """ v = [] for part in version.split("."): try: v.append(int(part)) except ValueError: pass return v class V5toV4(Adapter): """Adapt msg protocol v5 to v4""" version = "4.1" msg_type_map = { "execute_result": "pyout", "execute_input": "pyin", "error": "pyerr", "inspect_request": "object_info_request", "inspect_reply": "object_info_reply", } def update_header(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Update the header.""" msg["header"].pop("version", None) msg["parent_header"].pop("version", None) return msg # shell channel def kernel_info_reply(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Handle a kernel info reply.""" v4c = {} content = msg["content"] for key in ("language_version", "protocol_version"): if key in content: v4c[key] = _version_str_to_list(content[key]) if content.get("implementation", "") == "ipython" and "implementation_version" in content: v4c["ipython_version"] = _version_str_to_list(content["implementation_version"]) language_info = content.get("language_info", {}) language = language_info.get("name", "") v4c.setdefault("language", language) if "version" in language_info: v4c.setdefault("language_version", _version_str_to_list(language_info["version"])) msg["content"] = v4c return msg def execute_request(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Handle an execute request.""" content = msg["content"] content.setdefault("user_variables", []) return msg def execute_reply(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Handle an execute reply.""" content = msg["content"] content.setdefault("user_variables", {}) # TODO: handle payloads return msg def complete_request(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Handle a complete request.""" content = msg["content"] code = content["code"] cursor_pos = content["cursor_pos"] line, cursor_pos = code_to_line(code, cursor_pos) new_content = msg["content"] = {} new_content["text"] = "" new_content["line"] = line new_content["block"] = None new_content["cursor_pos"] = cursor_pos return msg def complete_reply(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Handle a complete reply.""" content = msg["content"] cursor_start = content.pop("cursor_start") cursor_end = content.pop("cursor_end") match_len = cursor_end - cursor_start content["matched_text"] = content["matches"][0][:match_len] content.pop("metadata", None) return msg def object_info_request(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Handle an object info request.""" content = msg["content"] code = content["code"] cursor_pos = content["cursor_pos"] line, _ = code_to_line(code, cursor_pos) new_content = msg["content"] = {} new_content["oname"] = extract_oname_v4(code, cursor_pos) new_content["detail_level"] = content["detail_level"] return msg def object_info_reply(self, msg: Dict[str, Any]) -> Dict[str, Any]: """inspect_reply can't be easily backward compatible""" msg["content"] = {"found": False, "oname": "unknown"} return msg # iopub channel def stream(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Handle a stream message.""" content = msg["content"] content["data"] = content.pop("text") return msg def display_data(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Handle a display data message.""" content = msg["content"] content.setdefault("source", "display") data = content["data"] if "application/json" in data: try: data["application/json"] = json.dumps(data["application/json"]) except Exception: # warn? pass return msg # stdin channel def input_request(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Handle an input request.""" msg["content"].pop("password", None) return msg class V4toV5(Adapter): """Convert msg spec V4 to V5""" version = "5.0" # invert message renames above msg_type_map = {v: k for k, v in V5toV4.msg_type_map.items()} def update_header(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Update the header.""" msg["header"]["version"] = self.version if msg["parent_header"]: msg["parent_header"]["version"] = self.version return msg # shell channel def kernel_info_reply(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Handle a kernel info reply.""" content = msg["content"] for key in ("protocol_version", "ipython_version"): if key in content: content[key] = ".".join(map(str, content[key])) content.setdefault("protocol_version", "4.1") if content["language"].startswith("python") and "ipython_version" in content: content["implementation"] = "ipython" content["implementation_version"] = content.pop("ipython_version") language = content.pop("language") language_info = content.setdefault("language_info", {}) language_info.setdefault("name", language) if "language_version" in content: language_version = ".".join(map(str, content.pop("language_version"))) language_info.setdefault("version", language_version) content["banner"] = "" return msg def execute_request(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Handle an execute request.""" content = msg["content"] user_variables = content.pop("user_variables", []) user_expressions = content.setdefault("user_expressions", {}) for v in user_variables: user_expressions[v] = v return msg def execute_reply(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Handle an execute reply.""" content = msg["content"] user_expressions = content.setdefault("user_expressions", {}) user_variables = content.pop("user_variables", {}) if user_variables: user_expressions.update(user_variables) # Pager payloads became a mime bundle for payload in content.get("payload", []): if payload.get("source", None) == "page" and ("text" in payload): if "data" not in payload: payload["data"] = {} payload["data"]["text/plain"] = payload.pop("text") return msg def complete_request(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Handle a complete request.""" old_content = msg["content"] new_content = msg["content"] = {} new_content["code"] = old_content["line"] new_content["cursor_pos"] = old_content["cursor_pos"] return msg def complete_reply(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Handle a complete reply.""" # complete_reply needs more context than we have to get cursor_start and end. # use special end=null to indicate current cursor position and negative offset # for start relative to the cursor. # start=None indicates that start == end (accounts for no -0). content = msg["content"] new_content = msg["content"] = {"status": "ok"} new_content["matches"] = content["matches"] if content["matched_text"]: new_content["cursor_start"] = -len(content["matched_text"]) else: # no -0, use None to indicate that start == end new_content["cursor_start"] = None new_content["cursor_end"] = None new_content["metadata"] = {} return msg def inspect_request(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Handle an inspect request.""" content = msg["content"] name = content["oname"] new_content = msg["content"] = {} new_content["code"] = name new_content["cursor_pos"] = len(name) new_content["detail_level"] = content["detail_level"] return msg def inspect_reply(self, msg: Dict[str, Any]) -> Dict[str, Any]: """inspect_reply can't be easily backward compatible""" content = msg["content"] new_content = msg["content"] = {"status": "ok"} found = new_content["found"] = content["found"] new_content["data"] = data = {} new_content["metadata"] = {} if found: lines = [] for key in ("call_def", "init_definition", "definition"): if content.get(key, False): lines.append(content[key]) break for key in ("call_docstring", "init_docstring", "docstring"): if content.get(key, False): lines.append(content[key]) break if not lines: lines.append("") data["text/plain"] = "\n".join(lines) return msg # iopub channel def stream(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Handle a stream message.""" content = msg["content"] content["text"] = content.pop("data") return msg def display_data(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Handle display data.""" content = msg["content"] content.pop("source", None) data = content["data"] if "application/json" in data: try: data["application/json"] = json.loads(data["application/json"]) except Exception: # warn? pass return msg # stdin channel def input_request(self, msg: Dict[str, Any]) -> Dict[str, Any]: """Handle an input request.""" msg["content"].setdefault("password", False) return msg def adapt(msg: Dict[str, Any], to_version: int = protocol_version_info[0]) -> Dict[str, Any]: """Adapt a single message to a target version Parameters ---------- msg : dict A Jupyter message. to_version : int, optional The target major version. If unspecified, adapt to the current version. Returns ------- msg : dict A Jupyter message appropriate in the new version. """ from .session import utcnow header = msg["header"] if "date" not in header: header["date"] = utcnow() if "version" in header: from_version = int(header["version"].split(".")[0]) else: # assume last version before adding the key to the header from_version = 4 adapter = adapters.get((from_version, to_version), None) if adapter is None: return msg return adapter(msg) # one adapter per major version from,to adapters = { (5, 4): V5toV4(), (4, 5): V4toV5(), } jupyter_client-8.6.2/jupyter_client/asynchronous/000077500000000000000000000000001462351563100223705ustar00rootroot00000000000000jupyter_client-8.6.2/jupyter_client/asynchronous/__init__.py000066400000000000000000000000561462351563100245020ustar00rootroot00000000000000from .client import AsyncKernelClient # noqa jupyter_client-8.6.2/jupyter_client/asynchronous/client.py000066400000000000000000000054661462351563100242330ustar00rootroot00000000000000"""Implements an async kernel client""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import typing as t import zmq.asyncio from traitlets import Instance, Type from ..channels import AsyncZMQSocketChannel, HBChannel from ..client import KernelClient, reqrep def wrapped(meth: t.Callable, channel: str) -> t.Callable: """Wrap a method on a channel and handle replies.""" def _(self: AsyncKernelClient, *args: t.Any, **kwargs: t.Any) -> t.Any: reply = kwargs.pop("reply", False) timeout = kwargs.pop("timeout", None) msg_id = meth(self, *args, **kwargs) if not reply: return msg_id return self._recv_reply(msg_id, timeout=timeout, channel=channel) return _ class AsyncKernelClient(KernelClient): """A KernelClient with async APIs ``get_[channel]_msg()`` methods wait for and return messages on channels, raising :exc:`queue.Empty` if no message arrives within ``timeout`` seconds. """ context = Instance(zmq.asyncio.Context) # type:ignore[arg-type] def _context_default(self) -> zmq.asyncio.Context: self._created_context = True return zmq.asyncio.Context() # -------------------------------------------------------------------------- # Channel proxy methods # -------------------------------------------------------------------------- get_shell_msg = KernelClient._async_get_shell_msg get_iopub_msg = KernelClient._async_get_iopub_msg get_stdin_msg = KernelClient._async_get_stdin_msg get_control_msg = KernelClient._async_get_control_msg wait_for_ready = KernelClient._async_wait_for_ready # The classes to use for the various channels shell_channel_class = Type(AsyncZMQSocketChannel) # type:ignore[arg-type] iopub_channel_class = Type(AsyncZMQSocketChannel) # type:ignore[arg-type] stdin_channel_class = Type(AsyncZMQSocketChannel) # type:ignore[arg-type] hb_channel_class = Type(HBChannel) # type:ignore[arg-type] control_channel_class = Type(AsyncZMQSocketChannel) # type:ignore[arg-type] _recv_reply = KernelClient._async_recv_reply # replies come on the shell channel execute = reqrep(wrapped, KernelClient.execute) history = reqrep(wrapped, KernelClient.history) complete = reqrep(wrapped, KernelClient.complete) is_complete = reqrep(wrapped, KernelClient.is_complete) inspect = reqrep(wrapped, KernelClient.inspect) kernel_info = reqrep(wrapped, KernelClient.kernel_info) comm_info = reqrep(wrapped, KernelClient.comm_info) is_alive = KernelClient._async_is_alive execute_interactive = KernelClient._async_execute_interactive # replies come on the control channel shutdown = reqrep(wrapped, KernelClient.shutdown, channel="control") jupyter_client-8.6.2/jupyter_client/blocking/000077500000000000000000000000001462351563100214255ustar00rootroot00000000000000jupyter_client-8.6.2/jupyter_client/blocking/__init__.py000066400000000000000000000000611462351563100235330ustar00rootroot00000000000000from .client import BlockingKernelClient # noqa jupyter_client-8.6.2/jupyter_client/blocking/client.py000066400000000000000000000052661462351563100232660ustar00rootroot00000000000000"""Implements a fully blocking kernel client. Useful for test suites and blocking terminal interfaces. """ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import typing as t from traitlets import Type from ..channels import HBChannel, ZMQSocketChannel from ..client import KernelClient, reqrep from ..utils import run_sync def wrapped(meth: t.Callable, channel: str) -> t.Callable: """Wrap a method on a channel and handle replies.""" def _(self: BlockingKernelClient, *args: t.Any, **kwargs: t.Any) -> t.Any: reply = kwargs.pop("reply", False) timeout = kwargs.pop("timeout", None) msg_id = meth(self, *args, **kwargs) if not reply: return msg_id return self._recv_reply(msg_id, timeout=timeout, channel=channel) return _ class BlockingKernelClient(KernelClient): """A KernelClient with blocking APIs ``get_[channel]_msg()`` methods wait for and return messages on channels, raising :exc:`queue.Empty` if no message arrives within ``timeout`` seconds. """ # -------------------------------------------------------------------------- # Channel proxy methods # -------------------------------------------------------------------------- get_shell_msg = run_sync(KernelClient._async_get_shell_msg) get_iopub_msg = run_sync(KernelClient._async_get_iopub_msg) get_stdin_msg = run_sync(KernelClient._async_get_stdin_msg) get_control_msg = run_sync(KernelClient._async_get_control_msg) wait_for_ready = run_sync(KernelClient._async_wait_for_ready) # The classes to use for the various channels shell_channel_class = Type(ZMQSocketChannel) # type:ignore[arg-type] iopub_channel_class = Type(ZMQSocketChannel) # type:ignore[arg-type] stdin_channel_class = Type(ZMQSocketChannel) # type:ignore[arg-type] hb_channel_class = Type(HBChannel) # type:ignore[arg-type] control_channel_class = Type(ZMQSocketChannel) # type:ignore[arg-type] _recv_reply = run_sync(KernelClient._async_recv_reply) # replies come on the shell channel execute = reqrep(wrapped, KernelClient.execute) history = reqrep(wrapped, KernelClient.history) complete = reqrep(wrapped, KernelClient.complete) inspect = reqrep(wrapped, KernelClient.inspect) kernel_info = reqrep(wrapped, KernelClient.kernel_info) comm_info = reqrep(wrapped, KernelClient.comm_info) is_alive = run_sync(KernelClient._async_is_alive) execute_interactive = run_sync(KernelClient._async_execute_interactive) # replies come on the control channel shutdown = reqrep(wrapped, KernelClient.shutdown, channel="control") jupyter_client-8.6.2/jupyter_client/channels.py000066400000000000000000000251231462351563100220050ustar00rootroot00000000000000"""Base classes to manage a Client's interaction with a running kernel""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import asyncio import atexit import time import typing as t from queue import Empty from threading import Event, Thread import zmq.asyncio from jupyter_core.utils import ensure_async from ._version import protocol_version_info from .channelsabc import HBChannelABC from .session import Session # import ZMQError in top-level namespace, to avoid ugly attribute-error messages # during garbage collection of threads at exit # ----------------------------------------------------------------------------- # Constants and exceptions # ----------------------------------------------------------------------------- major_protocol_version = protocol_version_info[0] class InvalidPortNumber(Exception): # noqa """An exception raised for an invalid port number.""" pass class HBChannel(Thread): """The heartbeat channel which monitors the kernel heartbeat. Note that the heartbeat channel is paused by default. As long as you start this channel, the kernel manager will ensure that it is paused and un-paused as appropriate. """ session = None socket = None address = None _exiting = False time_to_dead: float = 1.0 _running = None _pause = None _beating = None def __init__( self, context: t.Optional[zmq.Context] = None, session: t.Optional[Session] = None, address: t.Union[t.Tuple[str, int], str] = "", ) -> None: """Create the heartbeat monitor thread. Parameters ---------- context : :class:`zmq.Context` The ZMQ context to use. session : :class:`session.Session` The session to use. address : zmq url Standard (ip, port) tuple that the kernel is listening on. """ super().__init__() self.daemon = True self.context = context self.session = session if isinstance(address, tuple): if address[1] == 0: message = "The port number for a channel cannot be 0." raise InvalidPortNumber(message) address_str = "tcp://%s:%i" % address else: address_str = address self.address = address_str # running is False until `.start()` is called self._running = False self._exit = Event() # don't start paused self._pause = False self.poller = zmq.Poller() @staticmethod @atexit.register def _notice_exit() -> None: # Class definitions can be torn down during interpreter shutdown. # We only need to set _exiting flag if this hasn't happened. if HBChannel is not None: HBChannel._exiting = True def _create_socket(self) -> None: if self.socket is not None: # close previous socket, before opening a new one self.poller.unregister(self.socket) # type:ignore[unreachable] self.socket.close() assert self.context is not None self.socket = self.context.socket(zmq.REQ) self.socket.linger = 1000 assert self.address is not None self.socket.connect(self.address) self.poller.register(self.socket, zmq.POLLIN) async def _async_run(self) -> None: """The thread's main activity. Call start() instead.""" self._create_socket() self._running = True self._beating = True assert self.socket is not None while self._running: if self._pause: # just sleep, and skip the rest of the loop self._exit.wait(self.time_to_dead) continue since_last_heartbeat = 0.0 # no need to catch EFSM here, because the previous event was # either a recv or connect, which cannot be followed by EFSM) await ensure_async(self.socket.send(b"ping")) request_time = time.time() # Wait until timeout self._exit.wait(self.time_to_dead) # poll(0) means return immediately (see http://api.zeromq.org/2-1:zmq-poll) self._beating = bool(self.poller.poll(0)) if self._beating: # the poll above guarantees we have something to recv await ensure_async(self.socket.recv()) continue elif self._running: # nothing was received within the time limit, signal heart failure since_last_heartbeat = time.time() - request_time self.call_handlers(since_last_heartbeat) # and close/reopen the socket, because the REQ/REP cycle has been broken self._create_socket() continue def run(self) -> None: """Run the heartbeat thread.""" loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop.run_until_complete(self._async_run()) loop.close() def pause(self) -> None: """Pause the heartbeat.""" self._pause = True def unpause(self) -> None: """Unpause the heartbeat.""" self._pause = False def is_beating(self) -> bool: """Is the heartbeat running and responsive (and not paused).""" if self.is_alive() and not self._pause and self._beating: # noqa return True else: return False def stop(self) -> None: """Stop the channel's event loop and join its thread.""" self._running = False self._exit.set() self.join() self.close() def close(self) -> None: """Close the heartbeat thread.""" if self.socket is not None: try: self.socket.close(linger=0) except Exception: pass self.socket = None def call_handlers(self, since_last_heartbeat: float) -> None: """This method is called in the ioloop thread when a message arrives. Subclasses should override this method to handle incoming messages. It is important to remember that this method is called in the thread so that some logic must be done to ensure that the application level handlers are called in the application thread. """ pass HBChannelABC.register(HBChannel) class ZMQSocketChannel: """A ZMQ socket wrapper""" def __init__(self, socket: zmq.Socket, session: Session, loop: t.Any = None) -> None: """Create a channel. Parameters ---------- socket : :class:`zmq.Socket` The ZMQ socket to use. session : :class:`session.Session` The session to use. loop Unused here, for other implementations """ super().__init__() self.socket: t.Optional[zmq.Socket] = socket self.session = session def _recv(self, **kwargs: t.Any) -> t.Dict[str, t.Any]: assert self.socket is not None msg = self.socket.recv_multipart(**kwargs) ident, smsg = self.session.feed_identities(msg) return self.session.deserialize(smsg) def get_msg(self, timeout: t.Optional[float] = None) -> t.Dict[str, t.Any]: """Gets a message if there is one that is ready.""" assert self.socket is not None timeout_ms = None if timeout is None else int(timeout * 1000) # seconds to ms ready = self.socket.poll(timeout_ms) if ready: res = self._recv() return res else: raise Empty def get_msgs(self) -> t.List[t.Dict[str, t.Any]]: """Get all messages that are currently ready.""" msgs = [] while True: try: msgs.append(self.get_msg()) except Empty: break return msgs def msg_ready(self) -> bool: """Is there a message that has been received?""" assert self.socket is not None return bool(self.socket.poll(timeout=0)) def close(self) -> None: """Close the socket channel.""" if self.socket is not None: try: self.socket.close(linger=0) except Exception: pass self.socket = None stop = close def is_alive(self) -> bool: """Test whether the channel is alive.""" return self.socket is not None def send(self, msg: t.Dict[str, t.Any]) -> None: """Pass a message to the ZMQ socket to send""" assert self.socket is not None self.session.send(self.socket, msg) def start(self) -> None: """Start the socket channel.""" pass class AsyncZMQSocketChannel(ZMQSocketChannel): """A ZMQ socket in an async API""" socket: zmq.asyncio.Socket def __init__(self, socket: zmq.asyncio.Socket, session: Session, loop: t.Any = None) -> None: """Create a channel. Parameters ---------- socket : :class:`zmq.asyncio.Socket` The ZMQ socket to use. session : :class:`session.Session` The session to use. loop Unused here, for other implementations """ if not isinstance(socket, zmq.asyncio.Socket): msg = "Socket must be asyncio" # type:ignore[unreachable] raise ValueError(msg) super().__init__(socket, session) async def _recv(self, **kwargs: t.Any) -> t.Dict[str, t.Any]: # type:ignore[override] assert self.socket is not None msg = await self.socket.recv_multipart(**kwargs) _, smsg = self.session.feed_identities(msg) return self.session.deserialize(smsg) async def get_msg( # type:ignore[override] self, timeout: t.Optional[float] = None ) -> t.Dict[str, t.Any]: """Gets a message if there is one that is ready.""" assert self.socket is not None timeout_ms = None if timeout is None else int(timeout * 1000) # seconds to ms ready = await self.socket.poll(timeout_ms) if ready: res = await self._recv() return res else: raise Empty async def get_msgs(self) -> t.List[t.Dict[str, t.Any]]: # type:ignore[override] """Get all messages that are currently ready.""" msgs = [] while True: try: msgs.append(await self.get_msg()) except Empty: break return msgs async def msg_ready(self) -> bool: # type:ignore[override] """Is there a message that has been received?""" assert self.socket is not None return bool(await self.socket.poll(timeout=0)) jupyter_client-8.6.2/jupyter_client/channelsabc.py000066400000000000000000000022311462351563100224460ustar00rootroot00000000000000"""Abstract base classes for kernel client channels""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import abc class ChannelABC(metaclass=abc.ABCMeta): """A base class for all channel ABCs.""" @abc.abstractmethod def start(self) -> None: """Start the channel.""" pass @abc.abstractmethod def stop(self) -> None: """Stop the channel.""" pass @abc.abstractmethod def is_alive(self) -> bool: """Test whether the channel is alive.""" pass class HBChannelABC(ChannelABC): """HBChannel ABC. The docstrings for this class can be found in the base implementation: `jupyter_client.channels.HBChannel` """ @abc.abstractproperty def time_to_dead(self) -> float: pass @abc.abstractmethod def pause(self) -> None: """Pause the heartbeat channel.""" pass @abc.abstractmethod def unpause(self) -> None: """Unpause the heartbeat channel.""" pass @abc.abstractmethod def is_beating(self) -> bool: """Test whether the channel is beating.""" pass jupyter_client-8.6.2/jupyter_client/client.py000066400000000000000000000742001462351563100214700ustar00rootroot00000000000000"""Base class to manage the interaction with a running kernel""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import asyncio import inspect import sys import time import typing as t from functools import partial from getpass import getpass from queue import Empty import zmq.asyncio from jupyter_core.utils import ensure_async from traitlets import Any, Bool, Instance, Type from .channels import major_protocol_version from .channelsabc import ChannelABC, HBChannelABC from .clientabc import KernelClientABC from .connect import ConnectionFileMixin from .session import Session # some utilities to validate message structure, these might get moved elsewhere # if they prove to have more generic utility def validate_string_dict(dct: t.Dict[str, str]) -> None: """Validate that the input is a dict with string keys and values. Raises ValueError if not.""" for k, v in dct.items(): if not isinstance(k, str): raise ValueError("key %r in dict must be a string" % k) if not isinstance(v, str): raise ValueError("value %r in dict must be a string" % v) def reqrep(wrapped: t.Callable, meth: t.Callable, channel: str = "shell") -> t.Callable: wrapped = wrapped(meth, channel) if not meth.__doc__: # python -OO removes docstrings, # so don't bother building the wrapped docstring return wrapped basedoc, _ = meth.__doc__.split("Returns\n", 1) parts = [basedoc.strip()] if "Parameters" not in basedoc: parts.append( """ Parameters ---------- """ ) parts.append( """ reply: bool (default: False) Whether to wait for and return reply timeout: float or None (default: None) Timeout to use when waiting for a reply Returns ------- msg_id: str The msg_id of the request sent, if reply=False (default) reply: dict The reply message for this request, if reply=True """ ) wrapped.__doc__ = "\n".join(parts) return wrapped class KernelClient(ConnectionFileMixin): """Communicates with a single kernel on any host via zmq channels. There are five channels associated with each kernel: * shell: for request/reply calls to the kernel. * iopub: for the kernel to publish results to frontends. * hb: for monitoring the kernel's heartbeat. * stdin: for frontends to reply to raw_input calls in the kernel. * control: for kernel management calls to the kernel. The messages that can be sent on these channels are exposed as methods of the client (KernelClient.execute, complete, history, etc.). These methods only send the message, they don't wait for a reply. To get results, use e.g. :meth:`get_shell_msg` to fetch messages from the shell channel. """ # The PyZMQ Context to use for communication with the kernel. context = Instance(zmq.Context) _created_context = Bool(False) def _context_default(self) -> zmq.Context: self._created_context = True return zmq.Context() # The classes to use for the various channels shell_channel_class = Type(ChannelABC) iopub_channel_class = Type(ChannelABC) stdin_channel_class = Type(ChannelABC) hb_channel_class = Type(HBChannelABC) control_channel_class = Type(ChannelABC) # Protected traits _shell_channel = Any() _iopub_channel = Any() _stdin_channel = Any() _hb_channel = Any() _control_channel = Any() # flag for whether execute requests should be allowed to call raw_input: allow_stdin: bool = True def __del__(self) -> None: """Handle garbage collection. Destroy context if applicable.""" if ( self._created_context and self.context is not None # type:ignore[redundant-expr] and not self.context.closed ): if self.channels_running: if self.log: self.log.warning("Could not destroy zmq context for %s", self) else: if self.log: self.log.debug("Destroying zmq context for %s", self) self.context.destroy() try: super_del = super().__del__ # type:ignore[misc] except AttributeError: pass else: super_del() # -------------------------------------------------------------------------- # Channel proxy methods # -------------------------------------------------------------------------- async def _async_get_shell_msg(self, *args: t.Any, **kwargs: t.Any) -> t.Dict[str, t.Any]: """Get a message from the shell channel""" return await ensure_async(self.shell_channel.get_msg(*args, **kwargs)) async def _async_get_iopub_msg(self, *args: t.Any, **kwargs: t.Any) -> t.Dict[str, t.Any]: """Get a message from the iopub channel""" return await ensure_async(self.iopub_channel.get_msg(*args, **kwargs)) async def _async_get_stdin_msg(self, *args: t.Any, **kwargs: t.Any) -> t.Dict[str, t.Any]: """Get a message from the stdin channel""" return await ensure_async(self.stdin_channel.get_msg(*args, **kwargs)) async def _async_get_control_msg(self, *args: t.Any, **kwargs: t.Any) -> t.Dict[str, t.Any]: """Get a message from the control channel""" return await ensure_async(self.control_channel.get_msg(*args, **kwargs)) async def _async_wait_for_ready(self, timeout: t.Optional[float] = None) -> None: """Waits for a response when a client is blocked - Sets future time for timeout - Blocks on shell channel until a message is received - Exit if the kernel has died - If client times out before receiving a message from the kernel, send RuntimeError - Flush the IOPub channel """ if timeout is None: timeout = float("inf") abs_timeout = time.time() + timeout from .manager import KernelManager if not isinstance(self.parent, KernelManager): # This Client was not created by a KernelManager, # so wait for kernel to become responsive to heartbeats # before checking for kernel_info reply while not await self._async_is_alive(): if time.time() > abs_timeout: raise RuntimeError( "Kernel didn't respond to heartbeats in %d seconds and timed out" % timeout ) await asyncio.sleep(0.2) # Wait for kernel info reply on shell channel while True: self.kernel_info() try: msg = await ensure_async(self.shell_channel.get_msg(timeout=1)) except Empty: pass else: if msg["msg_type"] == "kernel_info_reply": # Checking that IOPub is connected. If it is not connected, start over. try: await ensure_async(self.iopub_channel.get_msg(timeout=0.2)) except Empty: pass else: self._handle_kernel_info_reply(msg) break if not await self._async_is_alive(): msg = "Kernel died before replying to kernel_info" raise RuntimeError(msg) # Check if current time is ready check time plus timeout if time.time() > abs_timeout: raise RuntimeError("Kernel didn't respond in %d seconds" % timeout) # Flush IOPub channel while True: try: msg = await ensure_async(self.iopub_channel.get_msg(timeout=0.2)) except Empty: break async def _async_recv_reply( self, msg_id: str, timeout: t.Optional[float] = None, channel: str = "shell" ) -> t.Dict[str, t.Any]: """Receive and return the reply for a given request""" if timeout is not None: deadline = time.monotonic() + timeout while True: if timeout is not None: timeout = max(0, deadline - time.monotonic()) try: if channel == "control": reply = await self._async_get_control_msg(timeout=timeout) else: reply = await self._async_get_shell_msg(timeout=timeout) except Empty as e: msg = "Timeout waiting for reply" raise TimeoutError(msg) from e if reply["parent_header"].get("msg_id") != msg_id: # not my reply, someone may have forgotten to retrieve theirs continue return reply async def _stdin_hook_default(self, msg: t.Dict[str, t.Any]) -> None: """Handle an input request""" content = msg["content"] prompt = getpass if content.get("password", False) else input try: raw_data = prompt(content["prompt"]) # type:ignore[operator] except EOFError: # turn EOFError into EOF character raw_data = "\x04" except KeyboardInterrupt: sys.stdout.write("\n") return # only send stdin reply if there *was not* another request # or execution finished while we were reading. if not (await self.stdin_channel.msg_ready() or await self.shell_channel.msg_ready()): self.input(raw_data) def _output_hook_default(self, msg: t.Dict[str, t.Any]) -> None: """Default hook for redisplaying plain-text output""" msg_type = msg["header"]["msg_type"] content = msg["content"] if msg_type == "stream": stream = getattr(sys, content["name"]) stream.write(content["text"]) elif msg_type in ("display_data", "execute_result"): sys.stdout.write(content["data"].get("text/plain", "")) elif msg_type == "error": sys.stderr.write("\n".join(content["traceback"])) def _output_hook_kernel( self, session: Session, socket: zmq.sugar.socket.Socket, parent_header: t.Any, msg: t.Dict[str, t.Any], ) -> None: """Output hook when running inside an IPython kernel adds rich output support. """ msg_type = msg["header"]["msg_type"] if msg_type in ("display_data", "execute_result", "error"): session.send(socket, msg_type, msg["content"], parent=parent_header) else: self._output_hook_default(msg) # -------------------------------------------------------------------------- # Channel management methods # -------------------------------------------------------------------------- def start_channels( self, shell: bool = True, iopub: bool = True, stdin: bool = True, hb: bool = True, control: bool = True, ) -> None: """Starts the channels for this kernel. This will create the channels if they do not exist and then start them (their activity runs in a thread). If port numbers of 0 are being used (random ports) then you must first call :meth:`start_kernel`. If the channels have been stopped and you call this, :class:`RuntimeError` will be raised. """ if iopub: self.iopub_channel.start() if shell: self.shell_channel.start() if stdin: self.stdin_channel.start() self.allow_stdin = True else: self.allow_stdin = False if hb: self.hb_channel.start() if control: self.control_channel.start() def stop_channels(self) -> None: """Stops all the running channels for this kernel. This stops their event loops and joins their threads. """ if self.shell_channel.is_alive(): self.shell_channel.stop() if self.iopub_channel.is_alive(): self.iopub_channel.stop() if self.stdin_channel.is_alive(): self.stdin_channel.stop() if self.hb_channel.is_alive(): self.hb_channel.stop() if self.control_channel.is_alive(): self.control_channel.stop() @property def channels_running(self) -> bool: """Are any of the channels created and running?""" return ( (self._shell_channel and self.shell_channel.is_alive()) or (self._iopub_channel and self.iopub_channel.is_alive()) or (self._stdin_channel and self.stdin_channel.is_alive()) or (self._hb_channel and self.hb_channel.is_alive()) or (self._control_channel and self.control_channel.is_alive()) ) ioloop = None # Overridden in subclasses that use pyzmq event loop @property def shell_channel(self) -> t.Any: """Get the shell channel object for this kernel.""" if self._shell_channel is None: url = self._make_url("shell") self.log.debug("connecting shell channel to %s", url) socket = self.connect_shell(identity=self.session.bsession) self._shell_channel = self.shell_channel_class( # type:ignore[call-arg,abstract] socket, self.session, self.ioloop ) return self._shell_channel @property def iopub_channel(self) -> t.Any: """Get the iopub channel object for this kernel.""" if self._iopub_channel is None: url = self._make_url("iopub") self.log.debug("connecting iopub channel to %s", url) socket = self.connect_iopub() self._iopub_channel = self.iopub_channel_class( # type:ignore[call-arg,abstract] socket, self.session, self.ioloop ) return self._iopub_channel @property def stdin_channel(self) -> t.Any: """Get the stdin channel object for this kernel.""" if self._stdin_channel is None: url = self._make_url("stdin") self.log.debug("connecting stdin channel to %s", url) socket = self.connect_stdin(identity=self.session.bsession) self._stdin_channel = self.stdin_channel_class( # type:ignore[call-arg,abstract] socket, self.session, self.ioloop ) return self._stdin_channel @property def hb_channel(self) -> t.Any: """Get the hb channel object for this kernel.""" if self._hb_channel is None: url = self._make_url("hb") self.log.debug("connecting heartbeat channel to %s", url) self._hb_channel = self.hb_channel_class( # type:ignore[call-arg,abstract] self.context, self.session, url ) return self._hb_channel @property def control_channel(self) -> t.Any: """Get the control channel object for this kernel.""" if self._control_channel is None: url = self._make_url("control") self.log.debug("connecting control channel to %s", url) socket = self.connect_control(identity=self.session.bsession) self._control_channel = self.control_channel_class( # type:ignore[call-arg,abstract] socket, self.session, self.ioloop ) return self._control_channel async def _async_is_alive(self) -> bool: """Is the kernel process still running?""" from .manager import KernelManager if isinstance(self.parent, KernelManager): # This KernelClient was created by a KernelManager, # we can ask the parent KernelManager: return await self.parent._async_is_alive() if self._hb_channel is not None: # We don't have access to the KernelManager, # so we use the heartbeat. return self._hb_channel.is_beating() # no heartbeat and not local, we can't tell if it's running, # so naively return True return True async def _async_execute_interactive( self, code: str, silent: bool = False, store_history: bool = True, user_expressions: t.Optional[t.Dict[str, t.Any]] = None, allow_stdin: t.Optional[bool] = None, stop_on_error: bool = True, timeout: t.Optional[float] = None, output_hook: t.Optional[t.Callable] = None, stdin_hook: t.Optional[t.Callable] = None, ) -> t.Dict[str, t.Any]: """Execute code in the kernel interactively Output will be redisplayed, and stdin prompts will be relayed as well. If an IPython kernel is detected, rich output will be displayed. You can pass a custom output_hook callable that will be called with every IOPub message that is produced instead of the default redisplay. .. versionadded:: 5.0 Parameters ---------- code : str A string of code in the kernel's language. silent : bool, optional (default False) If set, the kernel will execute the code as quietly possible, and will force store_history to be False. store_history : bool, optional (default True) If set, the kernel will store command history. This is forced to be False if silent is True. user_expressions : dict, optional A dict mapping names to expressions to be evaluated in the user's dict. The expression values are returned as strings formatted using :func:`repr`. allow_stdin : bool, optional (default self.allow_stdin) Flag for whether the kernel can send stdin requests to frontends. Some frontends (e.g. the Notebook) do not support stdin requests. If raw_input is called from code executed from such a frontend, a StdinNotImplementedError will be raised. stop_on_error: bool, optional (default True) Flag whether to abort the execution queue, if an exception is encountered. timeout: float or None (default: None) Timeout to use when waiting for a reply output_hook: callable(msg) Function to be called with output messages. If not specified, output will be redisplayed. stdin_hook: callable(msg) Function or awaitable to be called with stdin_request messages. If not specified, input/getpass will be called. Returns ------- reply: dict The reply message for this request """ if not self.iopub_channel.is_alive(): emsg = "IOPub channel must be running to receive output" raise RuntimeError(emsg) if allow_stdin is None: allow_stdin = self.allow_stdin if allow_stdin and not self.stdin_channel.is_alive(): emsg = "stdin channel must be running to allow input" raise RuntimeError(emsg) msg_id = await ensure_async( self.execute( code, silent=silent, store_history=store_history, user_expressions=user_expressions, allow_stdin=allow_stdin, stop_on_error=stop_on_error, ) ) if stdin_hook is None: stdin_hook = self._stdin_hook_default # detect IPython kernel if output_hook is None and "IPython" in sys.modules: from IPython import get_ipython ip = get_ipython() # type:ignore[no-untyped-call] in_kernel = getattr(ip, "kernel", False) if in_kernel: output_hook = partial( self._output_hook_kernel, ip.display_pub.session, ip.display_pub.pub_socket, ip.display_pub.parent_header, ) if output_hook is None: # default: redisplay plain-text outputs output_hook = self._output_hook_default # set deadline based on timeout if timeout is not None: deadline = time.monotonic() + timeout else: timeout_ms = None poller = zmq.asyncio.Poller() iopub_socket = self.iopub_channel.socket poller.register(iopub_socket, zmq.POLLIN) if allow_stdin: stdin_socket = self.stdin_channel.socket poller.register(stdin_socket, zmq.POLLIN) else: stdin_socket = None # wait for output and redisplay it while True: if timeout is not None: timeout = max(0, deadline - time.monotonic()) timeout_ms = int(1000 * timeout) events = dict(await poller.poll(timeout_ms)) if not events: emsg = "Timeout waiting for output" raise TimeoutError(emsg) if stdin_socket in events: req = await ensure_async(self.stdin_channel.get_msg(timeout=0)) res = stdin_hook(req) if inspect.isawaitable(res): await res continue if iopub_socket not in events: continue msg = await ensure_async(self.iopub_channel.get_msg(timeout=0)) if msg["parent_header"].get("msg_id") != msg_id: # not from my request continue output_hook(msg) # stop on idle if ( msg["header"]["msg_type"] == "status" and msg["content"]["execution_state"] == "idle" ): break # output is done, get the reply if timeout is not None: timeout = max(0, deadline - time.monotonic()) return await self._async_recv_reply(msg_id, timeout=timeout) # Methods to send specific messages on channels def execute( self, code: str, silent: bool = False, store_history: bool = True, user_expressions: t.Optional[t.Dict[str, t.Any]] = None, allow_stdin: t.Optional[bool] = None, stop_on_error: bool = True, ) -> str: """Execute code in the kernel. Parameters ---------- code : str A string of code in the kernel's language. silent : bool, optional (default False) If set, the kernel will execute the code as quietly possible, and will force store_history to be False. store_history : bool, optional (default True) If set, the kernel will store command history. This is forced to be False if silent is True. user_expressions : dict, optional A dict mapping names to expressions to be evaluated in the user's dict. The expression values are returned as strings formatted using :func:`repr`. allow_stdin : bool, optional (default self.allow_stdin) Flag for whether the kernel can send stdin requests to frontends. Some frontends (e.g. the Notebook) do not support stdin requests. If raw_input is called from code executed from such a frontend, a StdinNotImplementedError will be raised. stop_on_error: bool, optional (default True) Flag whether to abort the execution queue, if an exception is encountered. Returns ------- The msg_id of the message sent. """ if user_expressions is None: user_expressions = {} if allow_stdin is None: allow_stdin = self.allow_stdin # Don't waste network traffic if inputs are invalid if not isinstance(code, str): raise ValueError("code %r must be a string" % code) validate_string_dict(user_expressions) # Create class for content/msg creation. Related to, but possibly # not in Session. content = { "code": code, "silent": silent, "store_history": store_history, "user_expressions": user_expressions, "allow_stdin": allow_stdin, "stop_on_error": stop_on_error, } msg = self.session.msg("execute_request", content) self.shell_channel.send(msg) return msg["header"]["msg_id"] def complete(self, code: str, cursor_pos: t.Optional[int] = None) -> str: """Tab complete text in the kernel's namespace. Parameters ---------- code : str The context in which completion is requested. Can be anything between a variable name and an entire cell. cursor_pos : int, optional The position of the cursor in the block of code where the completion was requested. Default: ``len(code)`` Returns ------- The msg_id of the message sent. """ if cursor_pos is None: cursor_pos = len(code) content = {"code": code, "cursor_pos": cursor_pos} msg = self.session.msg("complete_request", content) self.shell_channel.send(msg) return msg["header"]["msg_id"] def inspect(self, code: str, cursor_pos: t.Optional[int] = None, detail_level: int = 0) -> str: """Get metadata information about an object in the kernel's namespace. It is up to the kernel to determine the appropriate object to inspect. Parameters ---------- code : str The context in which info is requested. Can be anything between a variable name and an entire cell. cursor_pos : int, optional The position of the cursor in the block of code where the info was requested. Default: ``len(code)`` detail_level : int, optional The level of detail for the introspection (0-2) Returns ------- The msg_id of the message sent. """ if cursor_pos is None: cursor_pos = len(code) content = { "code": code, "cursor_pos": cursor_pos, "detail_level": detail_level, } msg = self.session.msg("inspect_request", content) self.shell_channel.send(msg) return msg["header"]["msg_id"] def history( self, raw: bool = True, output: bool = False, hist_access_type: str = "range", **kwargs: t.Any, ) -> str: """Get entries from the kernel's history list. Parameters ---------- raw : bool If True, return the raw input. output : bool If True, then return the output as well. hist_access_type : str 'range' (fill in session, start and stop params), 'tail' (fill in n) or 'search' (fill in pattern param). session : int For a range request, the session from which to get lines. Session numbers are positive integers; negative ones count back from the current session. start : int The first line number of a history range. stop : int The final (excluded) line number of a history range. n : int The number of lines of history to get for a tail request. pattern : str The glob-syntax pattern for a search request. Returns ------- The ID of the message sent. """ if hist_access_type == "range": kwargs.setdefault("session", 0) kwargs.setdefault("start", 0) content = dict(raw=raw, output=output, hist_access_type=hist_access_type, **kwargs) msg = self.session.msg("history_request", content) self.shell_channel.send(msg) return msg["header"]["msg_id"] def kernel_info(self) -> str: """Request kernel info Returns ------- The msg_id of the message sent """ msg = self.session.msg("kernel_info_request") self.shell_channel.send(msg) return msg["header"]["msg_id"] def comm_info(self, target_name: t.Optional[str] = None) -> str: """Request comm info Returns ------- The msg_id of the message sent """ content = {} if target_name is None else {"target_name": target_name} msg = self.session.msg("comm_info_request", content) self.shell_channel.send(msg) return msg["header"]["msg_id"] def _handle_kernel_info_reply(self, msg: t.Dict[str, t.Any]) -> None: """handle kernel info reply sets protocol adaptation version. This might be run from a separate thread. """ adapt_version = int(msg["content"]["protocol_version"].split(".")[0]) if adapt_version != major_protocol_version: self.session.adapt_version = adapt_version def is_complete(self, code: str) -> str: """Ask the kernel whether some code is complete and ready to execute. Returns ------- The ID of the message sent. """ msg = self.session.msg("is_complete_request", {"code": code}) self.shell_channel.send(msg) return msg["header"]["msg_id"] def input(self, string: str) -> None: """Send a string of raw input to the kernel. This should only be called in response to the kernel sending an ``input_request`` message on the stdin channel. Returns ------- The ID of the message sent. """ content = {"value": string} msg = self.session.msg("input_reply", content) self.stdin_channel.send(msg) def shutdown(self, restart: bool = False) -> str: """Request an immediate kernel shutdown on the control channel. Upon receipt of the (empty) reply, client code can safely assume that the kernel has shut down and it's safe to forcefully terminate it if it's still alive. The kernel will send the reply via a function registered with Python's atexit module, ensuring it's truly done as the kernel is done with all normal operation. Returns ------- The msg_id of the message sent """ # Send quit message to kernel. Once we implement kernel-side setattr, # this should probably be done that way, but for now this will do. msg = self.session.msg("shutdown_request", {"restart": restart}) self.control_channel.send(msg) return msg["header"]["msg_id"] KernelClientABC.register(KernelClient) jupyter_client-8.6.2/jupyter_client/clientabc.py000066400000000000000000000053301462351563100221340ustar00rootroot00000000000000"""Abstract base class for kernel clients""" # ----------------------------------------------------------------------------- # Copyright (c) The Jupyter Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Imports # ----------------------------------------------------------------------------- from __future__ import annotations import abc from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from .channelsabc import ChannelABC # ----------------------------------------------------------------------------- # Main kernel client class # ----------------------------------------------------------------------------- class KernelClientABC(metaclass=abc.ABCMeta): """KernelManager ABC. The docstrings for this class can be found in the base implementation: `jupyter_client.client.KernelClient` """ @abc.abstractproperty def kernel(self) -> Any: pass @abc.abstractproperty def shell_channel_class(self) -> type[ChannelABC]: pass @abc.abstractproperty def iopub_channel_class(self) -> type[ChannelABC]: pass @abc.abstractproperty def hb_channel_class(self) -> type[ChannelABC]: pass @abc.abstractproperty def stdin_channel_class(self) -> type[ChannelABC]: pass @abc.abstractproperty def control_channel_class(self) -> type[ChannelABC]: pass # -------------------------------------------------------------------------- # Channel management methods # -------------------------------------------------------------------------- @abc.abstractmethod def start_channels( self, shell: bool = True, iopub: bool = True, stdin: bool = True, hb: bool = True, control: bool = True, ) -> None: """Start the channels for the client.""" pass @abc.abstractmethod def stop_channels(self) -> None: """Stop the channels for the client.""" pass @abc.abstractproperty def channels_running(self) -> bool: """Get whether the channels are running.""" pass @abc.abstractproperty def shell_channel(self) -> ChannelABC: pass @abc.abstractproperty def iopub_channel(self) -> ChannelABC: pass @abc.abstractproperty def stdin_channel(self) -> ChannelABC: pass @abc.abstractproperty def hb_channel(self) -> ChannelABC: pass @abc.abstractproperty def control_channel(self) -> ChannelABC: pass jupyter_client-8.6.2/jupyter_client/connect.py000066400000000000000000000613741462351563100216530ustar00rootroot00000000000000"""Utilities for connecting to jupyter kernels The :class:`ConnectionFileMixin` class in this module encapsulates the logic related to writing and reading connections files. """ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import errno import glob import json import os import socket import stat import tempfile import warnings from getpass import getpass from typing import TYPE_CHECKING, Any, Dict, Union, cast import zmq from jupyter_core.paths import jupyter_data_dir, jupyter_runtime_dir, secure_write from traitlets import Bool, CaselessStrEnum, Instance, Integer, Type, Unicode, observe from traitlets.config import LoggingConfigurable, SingletonConfigurable from .localinterfaces import localhost from .utils import _filefind if TYPE_CHECKING: from jupyter_client import BlockingKernelClient from .session import Session # Define custom type for kernel connection info KernelConnectionInfo = Dict[str, Union[int, str, bytes]] def write_connection_file( fname: str | None = None, shell_port: int = 0, iopub_port: int = 0, stdin_port: int = 0, hb_port: int = 0, control_port: int = 0, ip: str = "", key: bytes = b"", transport: str = "tcp", signature_scheme: str = "hmac-sha256", kernel_name: str = "", **kwargs: Any, ) -> tuple[str, KernelConnectionInfo]: """Generates a JSON config file, including the selection of random ports. Parameters ---------- fname : unicode The path to the file to write shell_port : int, optional The port to use for ROUTER (shell) channel. iopub_port : int, optional The port to use for the SUB channel. stdin_port : int, optional The port to use for the ROUTER (raw input) channel. control_port : int, optional The port to use for the ROUTER (control) channel. hb_port : int, optional The port to use for the heartbeat REP channel. ip : str, optional The ip address the kernel will bind to. key : str, optional The Session key used for message authentication. signature_scheme : str, optional The scheme used for message authentication. This has the form 'digest-hash', where 'digest' is the scheme used for digests, and 'hash' is the name of the hash function used by the digest scheme. Currently, 'hmac' is the only supported digest scheme, and 'sha256' is the default hash function. kernel_name : str, optional The name of the kernel currently connected to. """ if not ip: ip = localhost() # default to temporary connector file if not fname: fd, fname = tempfile.mkstemp(".json") os.close(fd) # Find open ports as necessary. ports: list[int] = [] sockets: list[socket.socket] = [] ports_needed = ( int(shell_port <= 0) + int(iopub_port <= 0) + int(stdin_port <= 0) + int(control_port <= 0) + int(hb_port <= 0) ) if transport == "tcp": for _ in range(ports_needed): sock = socket.socket() # struct.pack('ii', (0,0)) is 8 null bytes sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, b"\0" * 8) sock.bind((ip, 0)) sockets.append(sock) for sock in sockets: port = sock.getsockname()[1] sock.close() ports.append(port) else: N = 1 for _ in range(ports_needed): while os.path.exists(f"{ip}-{N!s}"): N += 1 ports.append(N) N += 1 if shell_port <= 0: shell_port = ports.pop(0) if iopub_port <= 0: iopub_port = ports.pop(0) if stdin_port <= 0: stdin_port = ports.pop(0) if control_port <= 0: control_port = ports.pop(0) if hb_port <= 0: hb_port = ports.pop(0) cfg: KernelConnectionInfo = { "shell_port": shell_port, "iopub_port": iopub_port, "stdin_port": stdin_port, "control_port": control_port, "hb_port": hb_port, } cfg["ip"] = ip cfg["key"] = key.decode() cfg["transport"] = transport cfg["signature_scheme"] = signature_scheme cfg["kernel_name"] = kernel_name cfg.update(kwargs) # Only ever write this file as user read/writeable # This would otherwise introduce a vulnerability as a file has secrets # which would let others execute arbitrary code as you with secure_write(fname) as f: f.write(json.dumps(cfg, indent=2)) if hasattr(stat, "S_ISVTX"): # set the sticky bit on the parent directory of the file # to ensure only owner can remove it runtime_dir = os.path.dirname(fname) if runtime_dir: permissions = os.stat(runtime_dir).st_mode new_permissions = permissions | stat.S_ISVTX if new_permissions != permissions: try: os.chmod(runtime_dir, new_permissions) except OSError as e: if e.errno == errno.EPERM: # suppress permission errors setting sticky bit on runtime_dir, # which we may not own. pass return fname, cfg def find_connection_file( filename: str = "kernel-*.json", path: str | list[str] | None = None, profile: str | None = None, ) -> str: """find a connection file, and return its absolute path. The current working directory and optional search path will be searched for the file if it is not given by absolute path. If the argument does not match an existing file, it will be interpreted as a fileglob, and the matching file in the profile's security dir with the latest access time will be used. Parameters ---------- filename : str The connection file or fileglob to search for. path : str or list of strs[optional] Paths in which to search for connection files. Returns ------- str : The absolute path of the connection file. """ if profile is not None: warnings.warn( "Jupyter has no profiles. profile=%s has been ignored." % profile, stacklevel=2 ) if path is None: path = [".", jupyter_runtime_dir()] if isinstance(path, str): path = [path] try: # first, try explicit name return _filefind(filename, path) except OSError: pass # not found by full name if "*" in filename: # given as a glob already pat = filename else: # accept any substring match pat = "*%s*" % filename matches = [] for p in path: matches.extend(glob.glob(os.path.join(p, pat))) matches = [os.path.abspath(m) for m in matches] if not matches: msg = f"Could not find {filename!r} in {path!r}" raise OSError(msg) elif len(matches) == 1: return matches[0] else: # get most recent match, by access time: return sorted(matches, key=lambda f: os.stat(f).st_atime)[-1] def tunnel_to_kernel( connection_info: str | KernelConnectionInfo, sshserver: str, sshkey: str | None = None, ) -> tuple[Any, ...]: """tunnel connections to a kernel via ssh This will open five SSH tunnels from localhost on this machine to the ports associated with the kernel. They can be either direct localhost-localhost tunnels, or if an intermediate server is necessary, the kernel must be listening on a public IP. Parameters ---------- connection_info : dict or str (path) Either a connection dict, or the path to a JSON connection file sshserver : str The ssh sever to use to tunnel to the kernel. Can be a full `user@server:port` string. ssh config aliases are respected. sshkey : str [optional] Path to file containing ssh key to use for authentication. Only necessary if your ssh config does not already associate a keyfile with the host. Returns ------- (shell, iopub, stdin, hb, control) : ints The five ports on localhost that have been forwarded to the kernel. """ from .ssh import tunnel if isinstance(connection_info, str): # it's a path, unpack it with open(connection_info) as f: connection_info = json.loads(f.read()) cf = cast(Dict[str, Any], connection_info) lports = tunnel.select_random_ports(5) rports = ( cf["shell_port"], cf["iopub_port"], cf["stdin_port"], cf["hb_port"], cf["control_port"], ) remote_ip = cf["ip"] if tunnel.try_passwordless_ssh(sshserver, sshkey): password: bool | str = False else: password = getpass("SSH Password for %s: " % sshserver) for lp, rp in zip(lports, rports): tunnel.ssh_tunnel(lp, rp, sshserver, remote_ip, sshkey, password) return tuple(lports) # ----------------------------------------------------------------------------- # Mixin for classes that work with connection files # ----------------------------------------------------------------------------- channel_socket_types = { "hb": zmq.REQ, "shell": zmq.DEALER, "iopub": zmq.SUB, "stdin": zmq.DEALER, "control": zmq.DEALER, } port_names = ["%s_port" % channel for channel in ("shell", "stdin", "iopub", "hb", "control")] class ConnectionFileMixin(LoggingConfigurable): """Mixin for configurable classes that work with connection files""" data_dir: str | Unicode = Unicode() def _data_dir_default(self) -> str: return jupyter_data_dir() # The addresses for the communication channels connection_file = Unicode( "", config=True, help="""JSON file in which to store connection info [default: kernel-.json] This file will contain the IP, ports, and authentication key needed to connect clients to this kernel. By default, this file will be created in the security dir of the current profile, but can be specified by absolute path. """, ) _connection_file_written = Bool(False) transport = CaselessStrEnum(["tcp", "ipc"], default_value="tcp", config=True) kernel_name: str | Unicode = Unicode() context = Instance(zmq.Context) ip = Unicode( config=True, help="""Set the kernel\'s IP address [default localhost]. If the IP address is something other than localhost, then Consoles on other machines will be able to connect to the Kernel, so be careful!""", ) def _ip_default(self) -> str: if self.transport == "ipc": if self.connection_file: return os.path.splitext(self.connection_file)[0] + "-ipc" else: return "kernel-ipc" else: return localhost() @observe("ip") def _ip_changed(self, change: Any) -> None: if change["new"] == "*": self.ip = "0.0.0.0" # noqa # protected traits hb_port = Integer(0, config=True, help="set the heartbeat port [default: random]") shell_port = Integer(0, config=True, help="set the shell (ROUTER) port [default: random]") iopub_port = Integer(0, config=True, help="set the iopub (PUB) port [default: random]") stdin_port = Integer(0, config=True, help="set the stdin (ROUTER) port [default: random]") control_port = Integer(0, config=True, help="set the control (ROUTER) port [default: random]") # names of the ports with random assignment _random_port_names: list[str] | None = None @property def ports(self) -> list[int]: return [getattr(self, name) for name in port_names] # The Session to use for communication with the kernel. session = Instance("jupyter_client.session.Session") def _session_default(self) -> Session: from .session import Session return Session(parent=self) # -------------------------------------------------------------------------- # Connection and ipc file management # -------------------------------------------------------------------------- def get_connection_info(self, session: bool = False) -> KernelConnectionInfo: """Return the connection info as a dict Parameters ---------- session : bool [default: False] If True, return our session object will be included in the connection info. If False (default), the configuration parameters of our session object will be included, rather than the session object itself. Returns ------- connect_info : dict dictionary of connection information. """ info = { "transport": self.transport, "ip": self.ip, "shell_port": self.shell_port, "iopub_port": self.iopub_port, "stdin_port": self.stdin_port, "hb_port": self.hb_port, "control_port": self.control_port, } if session: # add *clone* of my session, # so that state such as digest_history is not shared. info["session"] = self.session.clone() else: # add session info info.update( { "signature_scheme": self.session.signature_scheme, "key": self.session.key, } ) return info # factory for blocking clients blocking_class = Type(klass=object, default_value="jupyter_client.BlockingKernelClient") def blocking_client(self) -> BlockingKernelClient: """Make a blocking client connected to my kernel""" info = self.get_connection_info() bc = self.blocking_class(parent=self) # type:ignore[operator] bc.load_connection_info(info) return bc def cleanup_connection_file(self) -> None: """Cleanup connection file *if we wrote it* Will not raise if the connection file was already removed somehow. """ if self._connection_file_written: # cleanup connection files on full shutdown of kernel we started self._connection_file_written = False try: os.remove(self.connection_file) except (OSError, AttributeError): pass def cleanup_ipc_files(self) -> None: """Cleanup ipc files if we wrote them.""" if self.transport != "ipc": return for port in self.ports: ipcfile = "%s-%i" % (self.ip, port) try: os.remove(ipcfile) except OSError: pass def _record_random_port_names(self) -> None: """Records which of the ports are randomly assigned. Records on first invocation, if the transport is tcp. Does nothing on later invocations.""" if self.transport != "tcp": return if self._random_port_names is not None: return self._random_port_names = [] for name in port_names: if getattr(self, name) <= 0: self._random_port_names.append(name) def cleanup_random_ports(self) -> None: """Forgets randomly assigned port numbers and cleans up the connection file. Does nothing if no port numbers have been randomly assigned. In particular, does nothing unless the transport is tcp. """ if not self._random_port_names: return for name in self._random_port_names: setattr(self, name, 0) self.cleanup_connection_file() def write_connection_file(self, **kwargs: Any) -> None: """Write connection info to JSON dict in self.connection_file.""" if self._connection_file_written and os.path.exists(self.connection_file): return self.connection_file, cfg = write_connection_file( self.connection_file, transport=self.transport, ip=self.ip, key=self.session.key, stdin_port=self.stdin_port, iopub_port=self.iopub_port, shell_port=self.shell_port, hb_port=self.hb_port, control_port=self.control_port, signature_scheme=self.session.signature_scheme, kernel_name=self.kernel_name, **kwargs, ) # write_connection_file also sets default ports: self._record_random_port_names() for name in port_names: setattr(self, name, cfg[name]) self._connection_file_written = True def load_connection_file(self, connection_file: str | None = None) -> None: """Load connection info from JSON dict in self.connection_file. Parameters ---------- connection_file: unicode, optional Path to connection file to load. If unspecified, use self.connection_file """ if connection_file is None: connection_file = self.connection_file self.log.debug("Loading connection file %s", connection_file) with open(connection_file) as f: info = json.load(f) self.load_connection_info(info) def load_connection_info(self, info: KernelConnectionInfo) -> None: """Load connection info from a dict containing connection info. Typically this data comes from a connection file and is called by load_connection_file. Parameters ---------- info: dict Dictionary containing connection_info. See the connection_file spec for details. """ self.transport = info.get("transport", self.transport) self.ip = info.get("ip", self._ip_default()) # type:ignore[assignment] self._record_random_port_names() for name in port_names: if getattr(self, name) == 0 and name in info: # not overridden by config or cl_args setattr(self, name, info[name]) if "key" in info: key = info["key"] if isinstance(key, str): key = key.encode() assert isinstance(key, bytes) self.session.key = key if "signature_scheme" in info: self.session.signature_scheme = info["signature_scheme"] def _reconcile_connection_info(self, info: KernelConnectionInfo) -> None: """Reconciles the connection information returned from the Provisioner. Because some provisioners (like derivations of LocalProvisioner) may have already written the connection file, this method needs to ensure that, if the connection file exists, its contents match that of what was returned by the provisioner. If the file does exist and its contents do not match, the file will be replaced with the provisioner information (which is considered the truth). If the file does not exist, the connection information in 'info' is loaded into the KernelManager and written to the file. """ # Prevent over-writing a file that has already been written with the same # info. This is to prevent a race condition where the process has # already been launched but has not yet read the connection file - as is # the case with LocalProvisioners. file_exists: bool = False if os.path.exists(self.connection_file): with open(self.connection_file) as f: file_info = json.load(f) # Prior to the following comparison, we need to adjust the value of "key" to # be bytes, otherwise the comparison below will fail. file_info["key"] = file_info["key"].encode() if not self._equal_connections(info, file_info): os.remove(self.connection_file) # Contents mismatch - remove the file self._connection_file_written = False else: file_exists = True if not file_exists: # Load the connection info and write out file, clearing existing # port-based attributes so they will be reloaded for name in port_names: setattr(self, name, 0) self.load_connection_info(info) self.write_connection_file() # Ensure what is in KernelManager is what we expect. km_info = self.get_connection_info() if not self._equal_connections(info, km_info): msg = ( "KernelManager's connection information already exists and does not match " "the expected values returned from provisioner!" ) raise ValueError(msg) @staticmethod def _equal_connections(conn1: KernelConnectionInfo, conn2: KernelConnectionInfo) -> bool: """Compares pertinent keys of connection info data. Returns True if equivalent, False otherwise.""" pertinent_keys = [ "key", "ip", "stdin_port", "iopub_port", "shell_port", "control_port", "hb_port", "transport", "signature_scheme", ] return all(conn1.get(key) == conn2.get(key) for key in pertinent_keys) # -------------------------------------------------------------------------- # Creating connected sockets # -------------------------------------------------------------------------- def _make_url(self, channel: str) -> str: """Make a ZeroMQ URL for a given channel.""" transport = self.transport ip = self.ip port = getattr(self, "%s_port" % channel) if transport == "tcp": return "tcp://%s:%i" % (ip, port) else: return f"{transport}://{ip}-{port}" def _create_connected_socket( self, channel: str, identity: bytes | None = None ) -> zmq.sugar.socket.Socket: """Create a zmq Socket and connect it to the kernel.""" url = self._make_url(channel) socket_type = channel_socket_types[channel] self.log.debug("Connecting to: %s", url) sock = self.context.socket(socket_type) # set linger to 1s to prevent hangs at exit sock.linger = 1000 if identity: sock.identity = identity sock.connect(url) return sock def connect_iopub(self, identity: bytes | None = None) -> zmq.sugar.socket.Socket: """return zmq Socket connected to the IOPub channel""" sock = self._create_connected_socket("iopub", identity=identity) sock.setsockopt(zmq.SUBSCRIBE, b"") return sock def connect_shell(self, identity: bytes | None = None) -> zmq.sugar.socket.Socket: """return zmq Socket connected to the Shell channel""" return self._create_connected_socket("shell", identity=identity) def connect_stdin(self, identity: bytes | None = None) -> zmq.sugar.socket.Socket: """return zmq Socket connected to the StdIn channel""" return self._create_connected_socket("stdin", identity=identity) def connect_hb(self, identity: bytes | None = None) -> zmq.sugar.socket.Socket: """return zmq Socket connected to the Heartbeat channel""" return self._create_connected_socket("hb", identity=identity) def connect_control(self, identity: bytes | None = None) -> zmq.sugar.socket.Socket: """return zmq Socket connected to the Control channel""" return self._create_connected_socket("control", identity=identity) class LocalPortCache(SingletonConfigurable): """ Used to keep track of local ports in order to prevent race conditions that can occur between port acquisition and usage by the kernel. All locally- provisioned kernels should use this mechanism to limit the possibility of race conditions. Note that this does not preclude other applications from acquiring a cached but unused port, thereby re-introducing the issue this class is attempting to resolve (minimize). See: https://github.com/jupyter/jupyter_client/issues/487 """ def __init__(self, **kwargs: Any) -> None: super().__init__(**kwargs) self.currently_used_ports: set[int] = set() def find_available_port(self, ip: str) -> int: while True: tmp_sock = socket.socket() tmp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, b"\0" * 8) tmp_sock.bind((ip, 0)) port = tmp_sock.getsockname()[1] tmp_sock.close() # This is a workaround for https://github.com/jupyter/jupyter_client/issues/487 # We prevent two kernels to have the same ports. if port not in self.currently_used_ports: self.currently_used_ports.add(port) return port def return_port(self, port: int) -> None: if port in self.currently_used_ports: # Tolerate uncached ports self.currently_used_ports.remove(port) __all__ = [ "write_connection_file", "find_connection_file", "tunnel_to_kernel", "KernelConnectionInfo", "LocalPortCache", ] jupyter_client-8.6.2/jupyter_client/consoleapp.py000066400000000000000000000331311462351563100223530ustar00rootroot00000000000000""" A minimal application base mixin for all ZMQ based IPython frontends. This is not a complete console app, as subprocess will not be able to receive input, there is no real readline support, among other limitations. This is a refactoring of what used to be the IPython/qt/console/qtconsoleapp.py """ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import atexit import os import signal import sys import typing as t import uuid import warnings from jupyter_core.application import base_aliases, base_flags from traitlets import CBool, CUnicode, Dict, List, Type, Unicode from traitlets.config.application import boolean_flag from . import KernelManager, connect, find_connection_file, tunnel_to_kernel from .blocking import BlockingKernelClient from .connect import KernelConnectionInfo from .kernelspec import NoSuchKernel from .localinterfaces import localhost from .restarter import KernelRestarter from .session import Session from .utils import _filefind ConnectionFileMixin = connect.ConnectionFileMixin # ----------------------------------------------------------------------------- # Aliases and Flags # ----------------------------------------------------------------------------- flags: dict = {} flags.update(base_flags) # the flags that are specific to the frontend # these must be scrubbed before being passed to the kernel, # or it will raise an error on unrecognized flags app_flags: dict = { "existing": ( {"JupyterConsoleApp": {"existing": "kernel*.json"}}, "Connect to an existing kernel. If no argument specified, guess most recent", ), } app_flags.update( boolean_flag( "confirm-exit", "JupyterConsoleApp.confirm_exit", """Set to display confirmation dialog on exit. You can always use 'exit' or 'quit', to force a direct exit without any confirmation. This can also be set in the config file by setting `c.JupyterConsoleApp.confirm_exit`. """, """Don't prompt the user when exiting. This will terminate the kernel if it is owned by the frontend, and leave it alive if it is external. This can also be set in the config file by setting `c.JupyterConsoleApp.confirm_exit`. """, ) ) flags.update(app_flags) aliases: dict = {} aliases.update(base_aliases) # also scrub aliases from the frontend app_aliases: dict = { "ip": "JupyterConsoleApp.ip", "transport": "JupyterConsoleApp.transport", "hb": "JupyterConsoleApp.hb_port", "shell": "JupyterConsoleApp.shell_port", "iopub": "JupyterConsoleApp.iopub_port", "stdin": "JupyterConsoleApp.stdin_port", "control": "JupyterConsoleApp.control_port", "existing": "JupyterConsoleApp.existing", "f": "JupyterConsoleApp.connection_file", "kernel": "JupyterConsoleApp.kernel_name", "ssh": "JupyterConsoleApp.sshserver", "sshkey": "JupyterConsoleApp.sshkey", } aliases.update(app_aliases) # ----------------------------------------------------------------------------- # Classes # ----------------------------------------------------------------------------- classes: t.List[t.Type[t.Any]] = [KernelManager, KernelRestarter, Session] class JupyterConsoleApp(ConnectionFileMixin): """The base Jupyter console application.""" name: t.Union[str, Unicode] = "jupyter-console-mixin" description: t.Union[str, Unicode] = """ The Jupyter Console Mixin. This class contains the common portions of console client (QtConsole, ZMQ-based terminal console, etc). It is not a full console, in that launched terminal subprocesses will not be able to accept input. The Console using this mixing supports various extra features beyond the single-process Terminal IPython shell, such as connecting to existing kernel, via: jupyter console --existing as well as tunnel via SSH """ classes = classes flags = Dict(flags) aliases = Dict(aliases) kernel_manager_class = Type( default_value=KernelManager, config=True, help="The kernel manager class to use.", ) kernel_client_class = BlockingKernelClient kernel_argv = List(Unicode()) # connection info: sshserver = Unicode("", config=True, help="""The SSH server to use to connect to the kernel.""") sshkey = Unicode( "", config=True, help="""Path to the ssh key to use for logging in to the ssh server.""", ) def _connection_file_default(self) -> str: return "kernel-%i.json" % os.getpid() existing = CUnicode("", config=True, help="""Connect to an already running kernel""") kernel_name = Unicode( "python", config=True, help="""The name of the default kernel to start.""" ) confirm_exit = CBool( True, config=True, help=""" Set to display confirmation dialog on exit. You can always use 'exit' or 'quit', to force a direct exit without any confirmation.""", ) def build_kernel_argv(self, argv: object = None) -> None: """build argv to be passed to kernel subprocess Override in subclasses if any args should be passed to the kernel """ self.kernel_argv = self.extra_args # type:ignore[attr-defined] def init_connection_file(self) -> None: """find the connection file, and load the info if found. The current working directory and the current profile's security directory will be searched for the file if it is not given by absolute path. When attempting to connect to an existing kernel and the `--existing` argument does not match an existing file, it will be interpreted as a fileglob, and the matching file in the current profile's security dir with the latest access time will be used. After this method is called, self.connection_file contains the *full path* to the connection file, never just its name. """ runtime_dir = self.runtime_dir # type:ignore[attr-defined] if self.existing: try: cf = find_connection_file(self.existing, [".", runtime_dir]) except Exception: self.log.critical( "Could not find existing kernel connection file %s", self.existing ) self.exit(1) # type:ignore[attr-defined] self.log.debug("Connecting to existing kernel: %s", cf) self.connection_file = cf else: # not existing, check if we are going to write the file # and ensure that self.connection_file is a full path, not just the shortname try: cf = find_connection_file(self.connection_file, [runtime_dir]) except Exception: # file might not exist if self.connection_file == os.path.basename(self.connection_file): # just shortname, put it in security dir cf = os.path.join(runtime_dir, self.connection_file) else: cf = self.connection_file self.connection_file = cf try: self.connection_file = _filefind(self.connection_file, [".", runtime_dir]) except OSError: self.log.debug("Connection File not found: %s", self.connection_file) return # should load_connection_file only be used for existing? # as it is now, this allows reusing ports if an existing # file is requested try: self.load_connection_file() except Exception: self.log.error( "Failed to load connection file: %r", self.connection_file, exc_info=True, ) self.exit(1) # type:ignore[attr-defined] def init_ssh(self) -> None: """set up ssh tunnels, if needed.""" if not self.existing or (not self.sshserver and not self.sshkey): return self.load_connection_file() transport = self.transport ip = self.ip if transport != "tcp": self.log.error("Can only use ssh tunnels with TCP sockets, not %s", transport) sys.exit(-1) if self.sshkey and not self.sshserver: # specifying just the key implies that we are connecting directly self.sshserver = ip ip = localhost() # build connection dict for tunnels: info: KernelConnectionInfo = { "ip": ip, "shell_port": self.shell_port, "iopub_port": self.iopub_port, "stdin_port": self.stdin_port, "hb_port": self.hb_port, "control_port": self.control_port, } self.log.info("Forwarding connections to %s via %s", ip, self.sshserver) # tunnels return a new set of ports, which will be on localhost: self.ip = localhost() try: newports = tunnel_to_kernel(info, self.sshserver, self.sshkey) except: # noqa # even catch KeyboardInterrupt self.log.error("Could not setup tunnels", exc_info=True) self.exit(1) # type:ignore[attr-defined] ( self.shell_port, self.iopub_port, self.stdin_port, self.hb_port, self.control_port, ) = newports cf = self.connection_file root, ext = os.path.splitext(cf) self.connection_file = root + "-ssh" + ext self.write_connection_file() # write the new connection file self.log.info("To connect another client via this tunnel, use:") self.log.info("--existing %s", os.path.basename(self.connection_file)) def _new_connection_file(self) -> str: cf = "" while not cf: # we don't need a 128b id to distinguish kernels, use more readable # 48b node segment (12 hex chars). Users running more than 32k simultaneous # kernels can subclass. ident = str(uuid.uuid4()).split("-")[-1] runtime_dir = self.runtime_dir # type:ignore[attr-defined] cf = os.path.join(runtime_dir, "kernel-%s.json" % ident) # only keep if it's actually new. Protect against unlikely collision # in 48b random search space cf = cf if not os.path.exists(cf) else "" return cf def init_kernel_manager(self) -> None: """Initialize the kernel manager.""" # Don't let Qt or ZMQ swallow KeyboardInterupts. if self.existing: self.kernel_manager = None return signal.signal(signal.SIGINT, signal.SIG_DFL) # Create a KernelManager and start a kernel. try: self.kernel_manager = self.kernel_manager_class( ip=self.ip, session=self.session, transport=self.transport, shell_port=self.shell_port, iopub_port=self.iopub_port, stdin_port=self.stdin_port, hb_port=self.hb_port, control_port=self.control_port, connection_file=self.connection_file, kernel_name=self.kernel_name, parent=self, data_dir=self.data_dir, ) except NoSuchKernel: self.log.critical("Could not find kernel %s", self.kernel_name) self.exit(1) # type:ignore[attr-defined] self.kernel_manager = t.cast(KernelManager, self.kernel_manager) self.kernel_manager.client_factory = self.kernel_client_class kwargs = {} kwargs["extra_arguments"] = self.kernel_argv self.kernel_manager.start_kernel(**kwargs) atexit.register(self.kernel_manager.cleanup_ipc_files) if self.sshserver: # ssh, write new connection file self.kernel_manager.write_connection_file() # in case KM defaults / ssh writing changes things: km = self.kernel_manager self.shell_port = km.shell_port self.iopub_port = km.iopub_port self.stdin_port = km.stdin_port self.hb_port = km.hb_port self.control_port = km.control_port self.connection_file = km.connection_file atexit.register(self.kernel_manager.cleanup_connection_file) def init_kernel_client(self) -> None: """Initialize the kernel client.""" if self.kernel_manager is not None: self.kernel_client = self.kernel_manager.client() else: self.kernel_client = self.kernel_client_class( session=self.session, ip=self.ip, transport=self.transport, shell_port=self.shell_port, iopub_port=self.iopub_port, stdin_port=self.stdin_port, hb_port=self.hb_port, control_port=self.control_port, connection_file=self.connection_file, parent=self, ) self.kernel_client.start_channels() def initialize(self, argv: object = None) -> None: """ Classes which mix this class in should call: JupyterConsoleApp.initialize(self,argv) """ if getattr(self, "_dispatching", False): return self.init_connection_file() self.init_ssh() self.init_kernel_manager() self.init_kernel_client() class IPythonConsoleApp(JupyterConsoleApp): """An app to manage an ipython console.""" def __init__(self, *args: t.Any, **kwargs: t.Any) -> None: """Initialize the app.""" warnings.warn("IPythonConsoleApp is deprecated. Use JupyterConsoleApp", stacklevel=2) super().__init__(*args, **kwargs) jupyter_client-8.6.2/jupyter_client/ioloop/000077500000000000000000000000001462351563100211365ustar00rootroot00000000000000jupyter_client-8.6.2/jupyter_client/ioloop/__init__.py000066400000000000000000000003261462351563100232500ustar00rootroot00000000000000from .manager import AsyncIOLoopKernelManager # noqa from .manager import IOLoopKernelManager # noqa from .restarter import AsyncIOLoopKernelRestarter # noqa from .restarter import IOLoopKernelRestarter # noqa jupyter_client-8.6.2/jupyter_client/ioloop/manager.py000066400000000000000000000101021462351563100231140ustar00rootroot00000000000000"""A kernel manager with a tornado IOLoop""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import typing as t import zmq from tornado import ioloop from traitlets import Instance, Type from zmq.eventloop.zmqstream import ZMQStream from ..manager import AsyncKernelManager, KernelManager from .restarter import AsyncIOLoopKernelRestarter, IOLoopKernelRestarter def as_zmqstream(f: t.Any) -> t.Callable: """Convert a socket to a zmq stream.""" def wrapped(self: t.Any, *args: t.Any, **kwargs: t.Any) -> t.Any: save_socket_class = None # zmqstreams only support sync sockets if self.context._socket_class is not zmq.Socket: save_socket_class = self.context._socket_class self.context._socket_class = zmq.Socket try: socket = f(self, *args, **kwargs) finally: if save_socket_class: # restore default socket class self.context._socket_class = save_socket_class return ZMQStream(socket, self.loop) return wrapped class IOLoopKernelManager(KernelManager): """An io loop kernel manager.""" loop = Instance("tornado.ioloop.IOLoop") def _loop_default(self) -> ioloop.IOLoop: return ioloop.IOLoop.current() restarter_class = Type( default_value=IOLoopKernelRestarter, klass=IOLoopKernelRestarter, help=( "Type of KernelRestarter to use. " "Must be a subclass of IOLoopKernelRestarter.\n" "Override this to customize how kernel restarts are managed." ), config=True, ) _restarter: t.Any = Instance("jupyter_client.ioloop.IOLoopKernelRestarter", allow_none=True) def start_restarter(self) -> None: """Start the restarter.""" if self.autorestart and self.has_kernel: if self._restarter is None: self._restarter = self.restarter_class( kernel_manager=self, loop=self.loop, parent=self, log=self.log ) self._restarter.start() def stop_restarter(self) -> None: """Stop the restarter.""" if self.autorestart and self._restarter is not None: self._restarter.stop() connect_shell = as_zmqstream(KernelManager.connect_shell) connect_control = as_zmqstream(KernelManager.connect_control) connect_iopub = as_zmqstream(KernelManager.connect_iopub) connect_stdin = as_zmqstream(KernelManager.connect_stdin) connect_hb = as_zmqstream(KernelManager.connect_hb) class AsyncIOLoopKernelManager(AsyncKernelManager): """An async ioloop kernel manager.""" loop = Instance("tornado.ioloop.IOLoop") def _loop_default(self) -> ioloop.IOLoop: return ioloop.IOLoop.current() restarter_class = Type( default_value=AsyncIOLoopKernelRestarter, klass=AsyncIOLoopKernelRestarter, help=( "Type of KernelRestarter to use. " "Must be a subclass of AsyncIOLoopKernelManager.\n" "Override this to customize how kernel restarts are managed." ), config=True, ) _restarter: t.Any = Instance( "jupyter_client.ioloop.AsyncIOLoopKernelRestarter", allow_none=True ) def start_restarter(self) -> None: """Start the restarter.""" if self.autorestart and self.has_kernel: if self._restarter is None: self._restarter = self.restarter_class( kernel_manager=self, loop=self.loop, parent=self, log=self.log ) self._restarter.start() def stop_restarter(self) -> None: """Stop the restarter.""" if self.autorestart and self._restarter is not None: self._restarter.stop() connect_shell = as_zmqstream(AsyncKernelManager.connect_shell) connect_control = as_zmqstream(AsyncKernelManager.connect_control) connect_iopub = as_zmqstream(AsyncKernelManager.connect_iopub) connect_stdin = as_zmqstream(AsyncKernelManager.connect_stdin) connect_hb = as_zmqstream(AsyncKernelManager.connect_hb) jupyter_client-8.6.2/jupyter_client/ioloop/restarter.py000066400000000000000000000075021462351563100235270ustar00rootroot00000000000000"""A basic in process kernel monitor with autorestarting. This watches a kernel's state using KernelManager.is_alive and auto restarts the kernel if it dies. """ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import time import warnings from typing import Any from traitlets import Instance from ..restarter import KernelRestarter class IOLoopKernelRestarter(KernelRestarter): """Monitor and autorestart a kernel.""" loop = Instance("tornado.ioloop.IOLoop") def _loop_default(self) -> Any: warnings.warn( "IOLoopKernelRestarter.loop is deprecated in jupyter-client 5.2", DeprecationWarning, stacklevel=4, ) from tornado import ioloop return ioloop.IOLoop.current() _pcallback = None def start(self) -> None: """Start the polling of the kernel.""" if self._pcallback is None: from tornado.ioloop import PeriodicCallback self._pcallback = PeriodicCallback( self.poll, 1000 * self.time_to_dead, ) self._pcallback.start() def stop(self) -> None: """Stop the kernel polling.""" if self._pcallback is not None: self._pcallback.stop() self._pcallback = None class AsyncIOLoopKernelRestarter(IOLoopKernelRestarter): """An async io loop kernel restarter.""" async def poll(self) -> None: # type:ignore[override] """Poll the kernel.""" if self.debug: self.log.debug("Polling kernel...") is_alive = await self.kernel_manager.is_alive() now = time.time() if not is_alive: self._last_dead = now if self._restarting: self._restart_count += 1 else: self._restart_count = 1 if self._restart_count > self.restart_limit: self.log.warning("AsyncIOLoopKernelRestarter: restart failed") self._fire_callbacks("dead") self._restarting = False self._restart_count = 0 self.stop() else: newports = self.random_ports_until_alive and self._initial_startup self.log.info( "AsyncIOLoopKernelRestarter: restarting kernel (%i/%i), %s random ports", self._restart_count, self.restart_limit, "new" if newports else "keep", ) self._fire_callbacks("restart") await self.kernel_manager.restart_kernel(now=True, newports=newports) self._restarting = True else: # Since `is_alive` only tests that the kernel process is alive, it does not # indicate that the kernel has successfully completed startup. To solve this # correctly, we would need to wait for a kernel info reply, but it is not # necessarily appropriate to start a kernel client + channels in the # restarter. Therefore, we use "has been alive continuously for X time" as a # heuristic for a stable start up. # See https://github.com/jupyter/jupyter_client/pull/717 for details. stable_start_time = self.stable_start_time if self.kernel_manager.provisioner: stable_start_time = self.kernel_manager.provisioner.get_stable_start_time( recommended=stable_start_time ) if self._initial_startup and now - self._last_dead >= stable_start_time: self._initial_startup = False if self._restarting and now - self._last_dead >= stable_start_time: self.log.debug("AsyncIOLoopKernelRestarter: restart apparently succeeded") self._restarting = False jupyter_client-8.6.2/jupyter_client/jsonutil.py000066400000000000000000000136051462351563100220630ustar00rootroot00000000000000"""Utilities to manipulate JSON objects.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import math import numbers import re import types import warnings from binascii import b2a_base64 from collections.abc import Iterable from datetime import datetime from typing import Any, Optional, Union from dateutil.parser import parse as _dateutil_parse from dateutil.tz import tzlocal next_attr_name = "__next__" # Not sure what downstream library uses this, but left it to be safe # ----------------------------------------------------------------------------- # Globals and constants # ----------------------------------------------------------------------------- # timestamp formats ISO8601 = "%Y-%m-%dT%H:%M:%S.%f" ISO8601_PAT = re.compile( r"^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})(\.\d{1,6})?(Z|([\+\-]\d{2}:?\d{2}))?$" ) # holy crap, strptime is not threadsafe. # Calling it once at import seems to help. datetime.strptime("1", "%d") # noqa # ----------------------------------------------------------------------------- # Classes and functions # ----------------------------------------------------------------------------- def _ensure_tzinfo(dt: datetime) -> datetime: """Ensure a datetime object has tzinfo If no tzinfo is present, add tzlocal """ if not dt.tzinfo: # No more naïve datetime objects! warnings.warn( "Interpreting naive datetime as local %s. Please add timezone info to timestamps." % dt, DeprecationWarning, stacklevel=4, ) dt = dt.replace(tzinfo=tzlocal()) return dt def parse_date(s: Optional[str]) -> Optional[Union[str, datetime]]: """parse an ISO8601 date string If it is None or not a valid ISO8601 timestamp, it will be returned unmodified. Otherwise, it will return a datetime object. """ if s is None: return s m = ISO8601_PAT.match(s) if m: dt = _dateutil_parse(s) return _ensure_tzinfo(dt) return s def extract_dates(obj: Any) -> Any: """extract ISO8601 dates from unpacked JSON""" if isinstance(obj, dict): new_obj = {} # don't clobber for k, v in obj.items(): new_obj[k] = extract_dates(v) obj = new_obj elif isinstance(obj, (list, tuple)): obj = [extract_dates(o) for o in obj] elif isinstance(obj, str): obj = parse_date(obj) return obj def squash_dates(obj: Any) -> Any: """squash datetime objects into ISO8601 strings""" if isinstance(obj, dict): obj = dict(obj) # don't clobber for k, v in obj.items(): obj[k] = squash_dates(v) elif isinstance(obj, (list, tuple)): obj = [squash_dates(o) for o in obj] elif isinstance(obj, datetime): obj = obj.isoformat() return obj def date_default(obj: Any) -> Any: """DEPRECATED: Use jupyter_client.jsonutil.json_default""" warnings.warn( "date_default is deprecated since jupyter_client 7.0.0." " Use jupyter_client.jsonutil.json_default.", stacklevel=2, ) return json_default(obj) def json_default(obj: Any) -> Any: """default function for packing objects in JSON.""" if isinstance(obj, datetime): obj = _ensure_tzinfo(obj) return obj.isoformat().replace("+00:00", "Z") if isinstance(obj, bytes): return b2a_base64(obj, newline=False).decode("ascii") if isinstance(obj, Iterable): return list(obj) if isinstance(obj, numbers.Integral): return int(obj) if isinstance(obj, numbers.Real): return float(obj) raise TypeError("%r is not JSON serializable" % obj) # Copy of the old ipykernel's json_clean # This is temporary, it should be removed when we deprecate support for # non-valid JSON messages def json_clean(obj: Any) -> Any: # types that are 'atomic' and ok in json as-is. atomic_ok = (str, type(None)) # containers that we need to convert into lists container_to_list = (tuple, set, types.GeneratorType) # Since bools are a subtype of Integrals, which are a subtype of Reals, # we have to check them in that order. if isinstance(obj, bool): return obj if isinstance(obj, numbers.Integral): # cast int to int, in case subclasses override __str__ (e.g. boost enum, #4598) return int(obj) if isinstance(obj, numbers.Real): # cast out-of-range floats to their reprs if math.isnan(obj) or math.isinf(obj): return repr(obj) return float(obj) if isinstance(obj, atomic_ok): return obj if isinstance(obj, bytes): # unanmbiguous binary data is base64-encoded # (this probably should have happened upstream) return b2a_base64(obj, newline=False).decode("ascii") if isinstance(obj, container_to_list) or ( hasattr(obj, "__iter__") and hasattr(obj, next_attr_name) ): obj = list(obj) if isinstance(obj, list): return [json_clean(x) for x in obj] if isinstance(obj, dict): # First, validate that the dict won't lose data in conversion due to # key collisions after stringification. This can happen with keys like # True and 'true' or 1 and '1', which collide in JSON. nkeys = len(obj) nkeys_collapsed = len(set(map(str, obj))) if nkeys != nkeys_collapsed: msg = ( "dict cannot be safely converted to JSON: " "key collision would lead to dropped values" ) raise ValueError(msg) # If all OK, proceed by making the new dict that will be json-safe out = {} for k, v in obj.items(): out[str(k)] = json_clean(v) return out if isinstance(obj, datetime): return obj.strftime(ISO8601) # we don't understand it, it's probably an unserializable object raise ValueError("Can't clean for JSON: %r" % obj) jupyter_client-8.6.2/jupyter_client/kernelapp.py000066400000000000000000000055751462351563100222040ustar00rootroot00000000000000"""An application to launch a kernel by name in a local subprocess.""" import os import signal import typing as t import uuid from jupyter_core.application import JupyterApp, base_flags from tornado.ioloop import IOLoop from traitlets import Unicode from . import __version__ from .kernelspec import NATIVE_KERNEL_NAME, KernelSpecManager from .manager import KernelManager class KernelApp(JupyterApp): """Launch a kernel by name in a local subprocess.""" version = __version__ description = "Run a kernel locally in a subprocess" classes = [KernelManager, KernelSpecManager] aliases = { "kernel": "KernelApp.kernel_name", "ip": "KernelManager.ip", } flags = {"debug": base_flags["debug"]} kernel_name = Unicode(NATIVE_KERNEL_NAME, help="The name of a kernel type to start").tag( config=True ) def initialize(self, argv: t.Union[str, t.Sequence[str], None] = None) -> None: """Initialize the application.""" super().initialize(argv) cf_basename = "kernel-%s.json" % uuid.uuid4() self.config.setdefault("KernelManager", {}).setdefault( "connection_file", os.path.join(self.runtime_dir, cf_basename) ) self.km = KernelManager(kernel_name=self.kernel_name, config=self.config) self.loop = IOLoop.current() self.loop.add_callback(self._record_started) def setup_signals(self) -> None: """Shutdown on SIGTERM or SIGINT (Ctrl-C)""" if os.name == "nt": return def shutdown_handler(signo: int, frame: t.Any) -> None: self.loop.add_callback_from_signal(self.shutdown, signo) for sig in [signal.SIGTERM, signal.SIGINT]: signal.signal(sig, shutdown_handler) def shutdown(self, signo: int) -> None: """Shut down the application.""" self.log.info("Shutting down on signal %d", signo) self.km.shutdown_kernel() self.loop.stop() def log_connection_info(self) -> None: """Log the connection info for the kernel.""" cf = self.km.connection_file self.log.info("Connection file: %s", cf) self.log.info("To connect a client: --existing %s", os.path.basename(cf)) def _record_started(self) -> None: """For tests, create a file to indicate that we've started Do not rely on this except in our own tests! """ fn = os.environ.get("JUPYTER_CLIENT_TEST_RECORD_STARTUP_PRIVATE") if fn is not None: with open(fn, "wb"): pass def start(self) -> None: """Start the application.""" self.log.info("Starting kernel %r", self.kernel_name) try: self.km.start_kernel() self.log_connection_info() self.setup_signals() self.loop.start() finally: self.km.cleanup_resources() main = KernelApp.launch_instance jupyter_client-8.6.2/jupyter_client/kernelspec.py000066400000000000000000000364571462351563100223610ustar00rootroot00000000000000"""Tools for managing kernel specs""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import json import os import re import shutil import typing as t import warnings from jupyter_core.paths import SYSTEM_JUPYTER_PATH, jupyter_data_dir, jupyter_path from traitlets import Bool, CaselessStrEnum, Dict, HasTraits, List, Set, Type, Unicode, observe from traitlets.config import LoggingConfigurable from .provisioning import KernelProvisionerFactory as KPF # noqa pjoin = os.path.join NATIVE_KERNEL_NAME = "python3" class KernelSpec(HasTraits): """A kernel spec model object.""" argv: List[str] = List() name = Unicode() mimetype = Unicode() display_name = Unicode() language = Unicode() env = Dict() resource_dir = Unicode() interrupt_mode = CaselessStrEnum(["message", "signal"], default_value="signal") metadata = Dict() @classmethod def from_resource_dir(cls: type[KernelSpec], resource_dir: str) -> KernelSpec: """Create a KernelSpec object by reading kernel.json Pass the path to the *directory* containing kernel.json. """ kernel_file = pjoin(resource_dir, "kernel.json") with open(kernel_file, encoding="utf-8") as f: kernel_dict = json.load(f) return cls(resource_dir=resource_dir, **kernel_dict) def to_dict(self) -> dict[str, t.Any]: """Convert the kernel spec to a dict.""" d = { "argv": self.argv, "env": self.env, "display_name": self.display_name, "language": self.language, "interrupt_mode": self.interrupt_mode, "metadata": self.metadata, } return d def to_json(self) -> str: """Serialise this kernelspec to a JSON object. Returns a string. """ return json.dumps(self.to_dict()) _kernel_name_pat = re.compile(r"^[a-z0-9._\-]+$", re.IGNORECASE) def _is_valid_kernel_name(name: str) -> t.Any: """Check that a kernel name is valid.""" # quote is not unicode-safe on Python 2 return _kernel_name_pat.match(name) _kernel_name_description = ( "Kernel names can only contain ASCII letters and numbers and these separators:" " - . _ (hyphen, period, and underscore)." ) def _is_kernel_dir(path: str) -> bool: """Is ``path`` a kernel directory?""" return os.path.isdir(path) and os.path.isfile(pjoin(path, "kernel.json")) def _list_kernels_in(dir: str | None) -> dict[str, str]: """Return a mapping of kernel names to resource directories from dir. If dir is None or does not exist, returns an empty dict. """ if dir is None or not os.path.isdir(dir): return {} kernels = {} for f in os.listdir(dir): path = pjoin(dir, f) if not _is_kernel_dir(path): continue key = f.lower() if not _is_valid_kernel_name(key): warnings.warn( f"Invalid kernelspec directory name ({_kernel_name_description}): {path}", stacklevel=3, ) kernels[key] = path return kernels class NoSuchKernel(KeyError): # noqa """An error raised when there is no kernel of a give name.""" def __init__(self, name: str) -> None: """Initialize the error.""" self.name = name def __str__(self) -> str: return f"No such kernel named {self.name}" class KernelSpecManager(LoggingConfigurable): """A manager for kernel specs.""" kernel_spec_class = Type( KernelSpec, config=True, help="""The kernel spec class. This is configurable to allow subclassing of the KernelSpecManager for customized behavior. """, ) ensure_native_kernel = Bool( True, config=True, help="""If there is no Python kernelspec registered and the IPython kernel is available, ensure it is added to the spec list. """, ) data_dir = Unicode() def _data_dir_default(self) -> str: return jupyter_data_dir() user_kernel_dir = Unicode() def _user_kernel_dir_default(self) -> str: return pjoin(self.data_dir, "kernels") whitelist = Set( config=True, help="""Deprecated, use `KernelSpecManager.allowed_kernelspecs` """, ) allowed_kernelspecs = Set( config=True, help="""List of allowed kernel names. By default, all installed kernels are allowed. """, ) kernel_dirs: List[str] = List( help="List of kernel directories to search. Later ones take priority over earlier." ) _deprecated_aliases = { "whitelist": ("allowed_kernelspecs", "7.0"), } # Method copied from # https://github.com/jupyterhub/jupyterhub/blob/d1a85e53dccfc7b1dd81b0c1985d158cc6b61820/jupyterhub/auth.py#L143-L161 @observe(*list(_deprecated_aliases)) def _deprecated_trait(self, change: t.Any) -> None: """observer for deprecated traits""" old_attr = change.name new_attr, version = self._deprecated_aliases[old_attr] new_value = getattr(self, new_attr) if new_value != change.new: # only warn if different # protects backward-compatible config from warnings # if they set the same value under both names self.log.warning( f"{self.__class__.__name__}.{old_attr} is deprecated in jupyter_client " f"{version}, use {self.__class__.__name__}.{new_attr} instead" ) setattr(self, new_attr, change.new) def _kernel_dirs_default(self) -> list[str]: dirs = jupyter_path("kernels") # At some point, we should stop adding .ipython/kernels to the path, # but the cost to keeping it is very small. try: # this should always be valid on IPython 3+ from IPython.paths import get_ipython_dir dirs.append(os.path.join(get_ipython_dir(), "kernels")) except ModuleNotFoundError: pass return dirs def find_kernel_specs(self) -> dict[str, str]: """Returns a dict mapping kernel names to resource directories.""" d = {} for kernel_dir in self.kernel_dirs: kernels = _list_kernels_in(kernel_dir) for kname, spec in kernels.items(): if kname not in d: self.log.debug("Found kernel %s in %s", kname, kernel_dir) d[kname] = spec if self.ensure_native_kernel and NATIVE_KERNEL_NAME not in d: try: from ipykernel.kernelspec import RESOURCES self.log.debug( "Native kernel (%s) available from %s", NATIVE_KERNEL_NAME, RESOURCES, ) d[NATIVE_KERNEL_NAME] = RESOURCES except ImportError: self.log.warning("Native kernel (%s) is not available", NATIVE_KERNEL_NAME) if self.allowed_kernelspecs: # filter if there's an allow list d = {name: spec for name, spec in d.items() if name in self.allowed_kernelspecs} return d # TODO: Caching? def _get_kernel_spec_by_name(self, kernel_name: str, resource_dir: str) -> KernelSpec: """Returns a :class:`KernelSpec` instance for a given kernel_name and resource_dir. """ kspec = None if kernel_name == NATIVE_KERNEL_NAME: try: from ipykernel.kernelspec import RESOURCES, get_kernel_dict except ImportError: # It should be impossible to reach this, but let's play it safe pass else: if resource_dir == RESOURCES: kdict = get_kernel_dict() kspec = self.kernel_spec_class(resource_dir=resource_dir, **kdict) if not kspec: kspec = self.kernel_spec_class.from_resource_dir(resource_dir) if not KPF.instance(parent=self.parent).is_provisioner_available(kspec): raise NoSuchKernel(kernel_name) return kspec def _find_spec_directory(self, kernel_name: str) -> str | None: """Find the resource directory of a named kernel spec""" for kernel_dir in [kd for kd in self.kernel_dirs if os.path.isdir(kd)]: files = os.listdir(kernel_dir) for f in files: path = pjoin(kernel_dir, f) if f.lower() == kernel_name and _is_kernel_dir(path): return path if kernel_name == NATIVE_KERNEL_NAME: try: from ipykernel.kernelspec import RESOURCES except ImportError: pass else: return RESOURCES return None def get_kernel_spec(self, kernel_name: str) -> KernelSpec: """Returns a :class:`KernelSpec` instance for the given kernel_name. Raises :exc:`NoSuchKernel` if the given kernel name is not found. """ if not _is_valid_kernel_name(kernel_name): self.log.warning( f"Kernelspec name {kernel_name} is invalid: {_kernel_name_description}" ) resource_dir = self._find_spec_directory(kernel_name.lower()) if resource_dir is None: self.log.warning("Kernelspec name %s cannot be found!", kernel_name) raise NoSuchKernel(kernel_name) return self._get_kernel_spec_by_name(kernel_name, resource_dir) def get_all_specs(self) -> dict[str, t.Any]: """Returns a dict mapping kernel names to kernelspecs. Returns a dict of the form:: { 'kernel_name': { 'resource_dir': '/path/to/kernel_name', 'spec': {"the spec itself": ...} }, ... } """ d = self.find_kernel_specs() res = {} for kname, resource_dir in d.items(): try: if self.__class__ is KernelSpecManager: spec = self._get_kernel_spec_by_name(kname, resource_dir) else: # avoid calling private methods in subclasses, # which may have overridden find_kernel_specs # and get_kernel_spec, but not the newer get_all_specs spec = self.get_kernel_spec(kname) res[kname] = {"resource_dir": resource_dir, "spec": spec.to_dict()} except NoSuchKernel: pass # The appropriate warning has already been logged except Exception: self.log.warning("Error loading kernelspec %r", kname, exc_info=True) return res def remove_kernel_spec(self, name: str) -> str: """Remove a kernel spec directory by name. Returns the path that was deleted. """ save_native = self.ensure_native_kernel try: self.ensure_native_kernel = False specs = self.find_kernel_specs() finally: self.ensure_native_kernel = save_native spec_dir = specs[name] self.log.debug("Removing %s", spec_dir) if os.path.islink(spec_dir): os.remove(spec_dir) else: shutil.rmtree(spec_dir) return spec_dir def _get_destination_dir( self, kernel_name: str, user: bool = False, prefix: str | None = None ) -> str: if user: return os.path.join(self.user_kernel_dir, kernel_name) elif prefix: return os.path.join(os.path.abspath(prefix), "share", "jupyter", "kernels", kernel_name) else: return os.path.join(SYSTEM_JUPYTER_PATH[0], "kernels", kernel_name) def install_kernel_spec( self, source_dir: str, kernel_name: str | None = None, user: bool = False, replace: bool | None = None, prefix: str | None = None, ) -> str: """Install a kernel spec by copying its directory. If ``kernel_name`` is not given, the basename of ``source_dir`` will be used. If ``user`` is False, it will attempt to install into the systemwide kernel registry. If the process does not have appropriate permissions, an :exc:`OSError` will be raised. If ``prefix`` is given, the kernelspec will be installed to PREFIX/share/jupyter/kernels/KERNEL_NAME. This can be sys.prefix for installation inside virtual or conda envs. """ source_dir = source_dir.rstrip("/\\") if not kernel_name: kernel_name = os.path.basename(source_dir) kernel_name = kernel_name.lower() if not _is_valid_kernel_name(kernel_name): msg = f"Invalid kernel name {kernel_name!r}. {_kernel_name_description}" raise ValueError(msg) if user and prefix: msg = "Can't specify both user and prefix. Please choose one or the other." raise ValueError(msg) if replace is not None: warnings.warn( "replace is ignored. Installing a kernelspec always replaces an existing " "installation", DeprecationWarning, stacklevel=2, ) destination = self._get_destination_dir(kernel_name, user=user, prefix=prefix) self.log.debug("Installing kernelspec in %s", destination) kernel_dir = os.path.dirname(destination) if kernel_dir not in self.kernel_dirs: self.log.warning( "Installing to %s, which is not in %s. The kernelspec may not be found.", kernel_dir, self.kernel_dirs, ) if os.path.isdir(destination): self.log.info("Removing existing kernelspec in %s", destination) shutil.rmtree(destination) shutil.copytree(source_dir, destination) self.log.info("Installed kernelspec %s in %s", kernel_name, destination) return destination def install_native_kernel_spec(self, user: bool = False) -> None: """DEPRECATED: Use ipykernel.kernelspec.install""" warnings.warn( "install_native_kernel_spec is deprecated. Use ipykernel.kernelspec import install.", stacklevel=2, ) from ipykernel.kernelspec import install install(self, user=user) def find_kernel_specs() -> dict[str, str]: """Returns a dict mapping kernel names to resource directories.""" return KernelSpecManager().find_kernel_specs() def get_kernel_spec(kernel_name: str) -> KernelSpec: """Returns a :class:`KernelSpec` instance for the given kernel_name. Raises KeyError if the given kernel name is not found. """ return KernelSpecManager().get_kernel_spec(kernel_name) def install_kernel_spec( source_dir: str, kernel_name: str | None = None, user: bool = False, replace: bool | None = False, prefix: str | None = None, ) -> str: """Install a kernel spec in a given directory.""" return KernelSpecManager().install_kernel_spec(source_dir, kernel_name, user, replace, prefix) install_kernel_spec.__doc__ = KernelSpecManager.install_kernel_spec.__doc__ def install_native_kernel_spec(user: bool = False) -> None: """Install the native kernel spec.""" KernelSpecManager().install_native_kernel_spec(user=user) install_native_kernel_spec.__doc__ = KernelSpecManager.install_native_kernel_spec.__doc__ jupyter_client-8.6.2/jupyter_client/kernelspecapp.py000066400000000000000000000274201462351563100230500ustar00rootroot00000000000000"""Apps for managing kernel specs.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import errno import json import os.path import sys import typing as t from jupyter_core.application import JupyterApp, base_aliases, base_flags from traitlets import Bool, Dict, Instance, List, Unicode from traitlets.config.application import Application from . import __version__ from .kernelspec import KernelSpecManager from .provisioning.factory import KernelProvisionerFactory class ListKernelSpecs(JupyterApp): """An app to list kernel specs.""" version = __version__ description = """List installed kernel specifications.""" kernel_spec_manager = Instance(KernelSpecManager) json_output = Bool( False, help="output spec name and location as machine-readable json.", config=True, ) flags = { "json": ( {"ListKernelSpecs": {"json_output": True}}, "output spec name and location as machine-readable json.", ), "debug": base_flags["debug"], } def _kernel_spec_manager_default(self) -> KernelSpecManager: return KernelSpecManager(parent=self, data_dir=self.data_dir) def start(self) -> dict[str, t.Any] | None: # type:ignore[override] """Start the application.""" paths = self.kernel_spec_manager.find_kernel_specs() specs = self.kernel_spec_manager.get_all_specs() if not self.json_output: if not specs: print("No kernels available") return None # pad to width of longest kernel name name_len = len(sorted(paths, key=lambda name: len(name))[-1]) def path_key(item: t.Any) -> t.Any: """sort key function for Jupyter path priority""" path = item[1] for idx, prefix in enumerate(self.jupyter_path): if path.startswith(prefix): return (idx, path) # not in jupyter path, artificially added to the front return (-1, path) print("Available kernels:") for kernelname, path in sorted(paths.items(), key=path_key): print(f" {kernelname.ljust(name_len)} {path}") else: print(json.dumps({"kernelspecs": specs}, indent=2)) return specs class InstallKernelSpec(JupyterApp): """An app to install a kernel spec.""" version = __version__ description = """Install a kernel specification directory. Given a SOURCE DIRECTORY containing a kernel spec, jupyter will copy that directory into one of the Jupyter kernel directories. The default is to install kernelspecs for all users. `--user` can be specified to install a kernel only for the current user. """ examples = """ jupyter kernelspec install /path/to/my_kernel --user """ usage = "jupyter kernelspec install SOURCE_DIR [--options]" kernel_spec_manager = Instance(KernelSpecManager) def _kernel_spec_manager_default(self) -> KernelSpecManager: return KernelSpecManager(data_dir=self.data_dir) sourcedir = Unicode() kernel_name = Unicode("", config=True, help="Install the kernel spec with this name") def _kernel_name_default(self) -> str: return os.path.basename(self.sourcedir) user = Bool( False, config=True, help=""" Try to install the kernel spec to the per-user directory instead of the system or environment directory. """, ) prefix = Unicode( "", config=True, help="""Specify a prefix to install to, e.g. an env. The kernelspec will be installed in PREFIX/share/jupyter/kernels/ """, ) replace = Bool(False, config=True, help="Replace any existing kernel spec with this name.") aliases = { "name": "InstallKernelSpec.kernel_name", "prefix": "InstallKernelSpec.prefix", } aliases.update(base_aliases) flags = { "user": ( {"InstallKernelSpec": {"user": True}}, "Install to the per-user kernel registry", ), "replace": ( {"InstallKernelSpec": {"replace": True}}, "Replace any existing kernel spec with this name.", ), "sys-prefix": ( {"InstallKernelSpec": {"prefix": sys.prefix}}, "Install to Python's sys.prefix. Useful in conda/virtual environments.", ), "debug": base_flags["debug"], } def parse_command_line(self, argv: None | list[str]) -> None: # type:ignore[override] """Parse the command line args.""" super().parse_command_line(argv) # accept positional arg as profile name if self.extra_args: self.sourcedir = self.extra_args[0] else: print("No source directory specified.", file=sys.stderr) self.exit(1) def start(self) -> None: """Start the application.""" if self.user and self.prefix: self.exit("Can't specify both user and prefix. Please choose one or the other.") try: self.kernel_spec_manager.install_kernel_spec( self.sourcedir, kernel_name=self.kernel_name, user=self.user, prefix=self.prefix, replace=self.replace, ) except OSError as e: if e.errno == errno.EACCES: print(e, file=sys.stderr) if not self.user: print("Perhaps you want to install with `sudo` or `--user`?", file=sys.stderr) self.exit(1) elif e.errno == errno.EEXIST: print(f"A kernel spec is already present at {e.filename}", file=sys.stderr) self.exit(1) raise class RemoveKernelSpec(JupyterApp): """An app to remove a kernel spec.""" version = __version__ description = """Remove one or more Jupyter kernelspecs by name.""" examples = """jupyter kernelspec remove python2 [my_kernel ...]""" force = Bool(False, config=True, help="""Force removal, don't prompt for confirmation.""") spec_names = List(Unicode()) kernel_spec_manager = Instance(KernelSpecManager) def _kernel_spec_manager_default(self) -> KernelSpecManager: return KernelSpecManager(data_dir=self.data_dir, parent=self) flags = { "f": ({"RemoveKernelSpec": {"force": True}}, force.help), } flags.update(JupyterApp.flags) def parse_command_line(self, argv: list[str] | None) -> None: # type:ignore[override] """Parse the command line args.""" super().parse_command_line(argv) # accept positional arg as profile name if self.extra_args: self.spec_names = sorted(set(self.extra_args)) # remove duplicates else: self.exit("No kernelspec specified.") def start(self) -> None: """Start the application.""" self.kernel_spec_manager.ensure_native_kernel = False spec_paths = self.kernel_spec_manager.find_kernel_specs() missing = set(self.spec_names).difference(set(spec_paths)) if missing: self.exit("Couldn't find kernel spec(s): %s" % ", ".join(missing)) if not (self.force or self.answer_yes): print("Kernel specs to remove:") for name in self.spec_names: path = spec_paths.get(name, name) print(f" {name.ljust(20)}\t{path.ljust(20)}") answer = input("Remove %i kernel specs [y/N]: " % len(self.spec_names)) if not answer.lower().startswith("y"): return for kernel_name in self.spec_names: try: path = self.kernel_spec_manager.remove_kernel_spec(kernel_name) except OSError as e: if e.errno == errno.EACCES: print(e, file=sys.stderr) print("Perhaps you want sudo?", file=sys.stderr) self.exit(1) else: raise print(f"Removed {path}") class InstallNativeKernelSpec(JupyterApp): """An app to install the native kernel spec.""" version = __version__ description = """[DEPRECATED] Install the IPython kernel spec directory for this Python.""" kernel_spec_manager = Instance(KernelSpecManager) def _kernel_spec_manager_default(self) -> KernelSpecManager: # pragma: no cover return KernelSpecManager(data_dir=self.data_dir) user = Bool( False, config=True, help=""" Try to install the kernel spec to the per-user directory instead of the system or environment directory. """, ) flags = { "user": ( {"InstallNativeKernelSpec": {"user": True}}, "Install to the per-user kernel registry", ), "debug": base_flags["debug"], } def start(self) -> None: # pragma: no cover """Start the application.""" self.log.warning( "`jupyter kernelspec install-self` is DEPRECATED as of 4.0." " You probably want `ipython kernel install` to install the IPython kernelspec." ) try: from ipykernel import kernelspec except ModuleNotFoundError: print("ipykernel not available, can't install its spec.", file=sys.stderr) self.exit(1) try: kernelspec.install(self.kernel_spec_manager, user=self.user) except OSError as e: if e.errno == errno.EACCES: print(e, file=sys.stderr) if not self.user: print( "Perhaps you want to install with `sudo` or `--user`?", file=sys.stderr, ) self.exit(1) self.exit(e) # type:ignore[arg-type] class ListProvisioners(JupyterApp): """An app to list provisioners.""" version = __version__ description = """List available provisioners for use in kernel specifications.""" def start(self) -> None: """Start the application.""" kfp = KernelProvisionerFactory.instance(parent=self) print("Available kernel provisioners:") provisioners = kfp.get_provisioner_entries() # pad to width of longest kernel name name_len = len(sorted(provisioners, key=lambda name: len(name))[-1]) for name in sorted(provisioners): print(f" {name.ljust(name_len)} {provisioners[name]}") class KernelSpecApp(Application): """An app to manage kernel specs.""" version = __version__ name = "jupyter kernelspec" description = """Manage Jupyter kernel specifications.""" subcommands = Dict( { "list": (ListKernelSpecs, ListKernelSpecs.description.splitlines()[0]), "install": ( InstallKernelSpec, InstallKernelSpec.description.splitlines()[0], ), "uninstall": (RemoveKernelSpec, "Alias for remove"), "remove": (RemoveKernelSpec, RemoveKernelSpec.description.splitlines()[0]), "install-self": ( InstallNativeKernelSpec, InstallNativeKernelSpec.description.splitlines()[0], ), "provisioners": (ListProvisioners, ListProvisioners.description.splitlines()[0]), } ) aliases = {} flags = {} def start(self) -> None: """Start the application.""" if self.subapp is None: print("No subcommand specified. Must specify one of: %s" % list(self.subcommands)) print() self.print_description() self.print_subcommands() self.exit(1) else: return self.subapp.start() if __name__ == "__main__": KernelSpecApp.launch_instance() jupyter_client-8.6.2/jupyter_client/launcher.py000066400000000000000000000144531462351563100220170ustar00rootroot00000000000000"""Utilities for launching kernels""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import os import sys import warnings from subprocess import PIPE, Popen from typing import Any, Dict, List, Optional from traitlets.log import get_logger def launch_kernel( cmd: List[str], stdin: Optional[int] = None, stdout: Optional[int] = None, stderr: Optional[int] = None, env: Optional[Dict[str, str]] = None, independent: bool = False, cwd: Optional[str] = None, **kw: Any, ) -> Popen: """Launches a localhost kernel, binding to the specified ports. Parameters ---------- cmd : Popen list, A string of Python code that imports and executes a kernel entry point. stdin, stdout, stderr : optional (default None) Standards streams, as defined in subprocess.Popen. env: dict, optional Environment variables passed to the kernel independent : bool, optional (default False) If set, the kernel process is guaranteed to survive if this process dies. If not set, an effort is made to ensure that the kernel is killed when this process dies. Note that in this case it is still good practice to kill kernels manually before exiting. cwd : path, optional The working dir of the kernel process (default: cwd of this process). **kw: optional Additional arguments for Popen Returns ------- Popen instance for the kernel subprocess """ # Popen will fail (sometimes with a deadlock) if stdin, stdout, and stderr # are invalid. Unfortunately, there is in general no way to detect whether # they are valid. The following two blocks redirect them to (temporary) # pipes in certain important cases. # If this process has been backgrounded, our stdin is invalid. Since there # is no compelling reason for the kernel to inherit our stdin anyway, we'll # place this one safe and always redirect. redirect_in = True _stdin = PIPE if stdin is None else stdin # If this process in running on pythonw, we know that stdin, stdout, and # stderr are all invalid. redirect_out = sys.executable.endswith("pythonw.exe") if redirect_out: blackhole = open(os.devnull, "w") # noqa _stdout = blackhole if stdout is None else stdout _stderr = blackhole if stderr is None else stderr else: _stdout, _stderr = stdout, stderr env = env if (env is not None) else os.environ.copy() kwargs = kw.copy() main_args = { "stdin": _stdin, "stdout": _stdout, "stderr": _stderr, "cwd": cwd, "env": env, } kwargs.update(main_args) # Spawn a kernel. if sys.platform == "win32": if cwd: kwargs["cwd"] = cwd from .win_interrupt import create_interrupt_event # Create a Win32 event for interrupting the kernel # and store it in an environment variable. interrupt_event = create_interrupt_event() env["JPY_INTERRUPT_EVENT"] = str(interrupt_event) # deprecated old env name: env["IPY_INTERRUPT_EVENT"] = env["JPY_INTERRUPT_EVENT"] try: from _winapi import ( CREATE_NEW_PROCESS_GROUP, DUPLICATE_SAME_ACCESS, DuplicateHandle, GetCurrentProcess, ) except: # noqa from _subprocess import ( CREATE_NEW_PROCESS_GROUP, DUPLICATE_SAME_ACCESS, DuplicateHandle, GetCurrentProcess, ) # create a handle on the parent to be inherited if independent: kwargs["creationflags"] = CREATE_NEW_PROCESS_GROUP else: pid = GetCurrentProcess() handle = DuplicateHandle( pid, pid, pid, 0, True, DUPLICATE_SAME_ACCESS, # Inheritable by new processes. ) env["JPY_PARENT_PID"] = str(int(handle)) # Prevent creating new console window on pythonw if redirect_out: kwargs["creationflags"] = ( kwargs.setdefault("creationflags", 0) | 0x08000000 ) # CREATE_NO_WINDOW # Avoid closing the above parent and interrupt handles. # close_fds is True by default on Python >=3.7 # or when no stream is captured on Python <3.7 # (we always capture stdin, so this is already False by default on <3.7) kwargs["close_fds"] = False else: # Create a new session. # This makes it easier to interrupt the kernel, # because we want to interrupt the whole process group. # We don't use setpgrp, which is known to cause problems for kernels starting # certain interactive subprocesses, such as bash -i. kwargs["start_new_session"] = True if not independent: env["JPY_PARENT_PID"] = str(os.getpid()) try: # Allow to use ~/ in the command or its arguments cmd = [os.path.expanduser(s) for s in cmd] proc = Popen(cmd, **kwargs) # noqa except Exception as ex: try: msg = "Failed to run command:\n{}\n PATH={!r}\n with kwargs:\n{!r}\n" # exclude environment variables, # which may contain access tokens and the like. without_env = {key: value for key, value in kwargs.items() if key != "env"} msg = msg.format(cmd, env.get("PATH", os.defpath), without_env) get_logger().error(msg) except Exception as ex2: # Don't let a formatting/logger issue lead to the wrong exception warnings.warn(f"Failed to run command: '{cmd}' due to exception: {ex}", stacklevel=2) warnings.warn( f"The following exception occurred handling the previous failure: {ex2}", stacklevel=2, ) raise ex if sys.platform == "win32": # Attach the interrupt event to the Popen object so it can be used later. proc.win32_interrupt_event = interrupt_event # Clean up pipes created to work around Popen bug. if redirect_in and stdin is None: assert proc.stdin is not None proc.stdin.close() return proc __all__ = [ "launch_kernel", ] jupyter_client-8.6.2/jupyter_client/localinterfaces.py000066400000000000000000000203751462351563100233540ustar00rootroot00000000000000"""Utilities for identifying local IP addresses.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import os import re import socket import subprocess from subprocess import PIPE, Popen from typing import Any, Callable, Iterable, Sequence from warnings import warn LOCAL_IPS: list = [] PUBLIC_IPS: list = [] LOCALHOST: str = "" def _uniq_stable(elems: Iterable) -> list: """uniq_stable(elems) -> list Return from an iterable, a list of all the unique elements in the input, maintaining the order in which they first appear. """ seen = set() value = [] for x in elems: if x not in seen: value.append(x) seen.add(x) return value def _get_output(cmd: str | Sequence[str]) -> str: """Get output of a command, raising IOError if it fails""" startupinfo = None if os.name == "nt": startupinfo = subprocess.STARTUPINFO() # type:ignore[attr-defined] startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW # type:ignore[attr-defined] p = Popen(cmd, stdout=PIPE, stderr=PIPE, startupinfo=startupinfo) # noqa stdout, stderr = p.communicate() if p.returncode: msg = "Failed to run {}: {}".format(cmd, stderr.decode("utf8", "replace")) raise OSError(msg) return stdout.decode("utf8", "replace") def _only_once(f: Callable) -> Callable: """decorator to only run a function once""" f.called = False # type:ignore[attr-defined] def wrapped(**kwargs: Any) -> Any: if f.called: # type:ignore[attr-defined] return ret = f(**kwargs) f.called = True # type:ignore[attr-defined] return ret return wrapped def _requires_ips(f: Callable) -> Callable: """decorator to ensure load_ips has been run before f""" def ips_loaded(*args: Any, **kwargs: Any) -> Any: _load_ips() return f(*args, **kwargs) return ips_loaded # subprocess-parsing ip finders class NoIPAddresses(Exception): # noqa pass def _populate_from_list(addrs: Sequence[str] | None) -> None: """populate local and public IPs from flat list of all IPs""" if not addrs: raise NoIPAddresses global LOCALHOST public_ips = [] local_ips = [] for ip in addrs: local_ips.append(ip) if not ip.startswith("127."): public_ips.append(ip) elif not LOCALHOST: LOCALHOST = ip if not LOCALHOST or LOCALHOST == "127.0.0.1": LOCALHOST = "127.0.0.1" local_ips.insert(0, LOCALHOST) local_ips.extend(["0.0.0.0", ""]) # noqa LOCAL_IPS[:] = _uniq_stable(local_ips) PUBLIC_IPS[:] = _uniq_stable(public_ips) _ifconfig_ipv4_pat = re.compile(r"inet\b.*?(\d+\.\d+\.\d+\.\d+)", re.IGNORECASE) def _load_ips_ifconfig() -> None: """load ip addresses from `ifconfig` output (posix)""" try: out = _get_output("ifconfig") except OSError: # no ifconfig, it's usually in /sbin and /sbin is not on everyone's PATH out = _get_output("/sbin/ifconfig") lines = out.splitlines() addrs = [] for line in lines: m = _ifconfig_ipv4_pat.match(line.strip()) if m: addrs.append(m.group(1)) _populate_from_list(addrs) def _load_ips_ip() -> None: """load ip addresses from `ip addr` output (Linux)""" out = _get_output(["ip", "-f", "inet", "addr"]) lines = out.splitlines() addrs = [] for line in lines: blocks = line.lower().split() if (len(blocks) >= 2) and (blocks[0] == "inet"): addrs.append(blocks[1].split("/")[0]) _populate_from_list(addrs) _ipconfig_ipv4_pat = re.compile(r"ipv4.*?(\d+\.\d+\.\d+\.\d+)$", re.IGNORECASE) def _load_ips_ipconfig() -> None: """load ip addresses from `ipconfig` output (Windows)""" out = _get_output("ipconfig") lines = out.splitlines() addrs = [] for line in lines: m = _ipconfig_ipv4_pat.match(line.strip()) if m: addrs.append(m.group(1)) _populate_from_list(addrs) def _load_ips_netifaces() -> None: """load ip addresses with netifaces""" import netifaces # type: ignore[import-not-found] global LOCALHOST local_ips = [] public_ips = [] # list of iface names, 'lo0', 'eth0', etc. for iface in netifaces.interfaces(): # list of ipv4 addrinfo dicts ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, []) for entry in ipv4s: addr = entry.get("addr") if not addr: continue if not (iface.startswith("lo") or addr.startswith("127.")): public_ips.append(addr) elif not LOCALHOST: LOCALHOST = addr local_ips.append(addr) if not LOCALHOST: # we never found a loopback interface (can this ever happen?), assume common default LOCALHOST = "127.0.0.1" local_ips.insert(0, LOCALHOST) local_ips.extend(["0.0.0.0", ""]) # noqa LOCAL_IPS[:] = _uniq_stable(local_ips) PUBLIC_IPS[:] = _uniq_stable(public_ips) def _load_ips_gethostbyname() -> None: """load ip addresses with socket.gethostbyname_ex This can be slow. """ global LOCALHOST try: LOCAL_IPS[:] = socket.gethostbyname_ex("localhost")[2] except OSError: # assume common default LOCAL_IPS[:] = ["127.0.0.1"] try: hostname = socket.gethostname() PUBLIC_IPS[:] = socket.gethostbyname_ex(hostname)[2] # try hostname.local, in case hostname has been short-circuited to loopback if not hostname.endswith(".local") and all(ip.startswith("127") for ip in PUBLIC_IPS): PUBLIC_IPS[:] = socket.gethostbyname_ex(socket.gethostname() + ".local")[2] except OSError: pass finally: PUBLIC_IPS[:] = _uniq_stable(PUBLIC_IPS) LOCAL_IPS.extend(PUBLIC_IPS) # include all-interface aliases: 0.0.0.0 and '' LOCAL_IPS.extend(["0.0.0.0", ""]) # noqa LOCAL_IPS[:] = _uniq_stable(LOCAL_IPS) LOCALHOST = LOCAL_IPS[0] def _load_ips_dumb() -> None: """Fallback in case of unexpected failure""" global LOCALHOST LOCALHOST = "127.0.0.1" LOCAL_IPS[:] = [LOCALHOST, "0.0.0.0", ""] # noqa PUBLIC_IPS[:] = [] @_only_once def _load_ips(suppress_exceptions: bool = True) -> None: """load the IPs that point to this machine This function will only ever be called once. It will use netifaces to do it quickly if available. Then it will fallback on parsing the output of ifconfig / ip addr / ipconfig, as appropriate. Finally, it will fallback on socket.gethostbyname_ex, which can be slow. """ try: # first priority, use netifaces try: return _load_ips_netifaces() except ImportError: pass # second priority, parse subprocess output (how reliable is this?) if os.name == "nt": try: return _load_ips_ipconfig() except (OSError, NoIPAddresses): pass else: try: return _load_ips_ip() except (OSError, NoIPAddresses): pass try: return _load_ips_ifconfig() except (OSError, NoIPAddresses): pass # lowest priority, use gethostbyname return _load_ips_gethostbyname() except Exception as e: if not suppress_exceptions: raise # unexpected error shouldn't crash, load dumb default values instead. warn("Unexpected error discovering local network interfaces: %s" % e, stacklevel=2) _load_ips_dumb() @_requires_ips def local_ips() -> list[str]: """return the IP addresses that point to this machine""" return LOCAL_IPS @_requires_ips def public_ips() -> list[str]: """return the IP addresses for this machine that are visible to other machines""" return PUBLIC_IPS @_requires_ips def localhost() -> str: """return ip for localhost (almost always 127.0.0.1)""" return LOCALHOST @_requires_ips def is_local_ip(ip: str) -> bool: """does `ip` point to this machine?""" return ip in LOCAL_IPS @_requires_ips def is_public_ip(ip: str) -> bool: """is `ip` a publicly visible address?""" return ip in PUBLIC_IPS jupyter_client-8.6.2/jupyter_client/manager.py000066400000000000000000000731621462351563100216320ustar00rootroot00000000000000"""Base class to manage a running kernel""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import asyncio import functools import os import re import signal import sys import typing as t import uuid import warnings from asyncio.futures import Future from concurrent.futures import Future as CFuture from contextlib import contextmanager from enum import Enum import zmq from jupyter_core.utils import run_sync from traitlets import ( Any, Bool, Dict, DottedObjectName, Float, Instance, Type, Unicode, default, observe, observe_compat, ) from traitlets.utils.importstring import import_item from . import kernelspec from .asynchronous import AsyncKernelClient from .blocking import BlockingKernelClient from .client import KernelClient from .connect import ConnectionFileMixin from .managerabc import KernelManagerABC from .provisioning import KernelProvisionerBase from .provisioning import KernelProvisionerFactory as KPF # noqa class _ShutdownStatus(Enum): """ This is so far used only for testing in order to track the internal state of the shutdown logic, and verifying which path is taken for which missbehavior. """ Unset = None ShutdownRequest = "ShutdownRequest" SigtermRequest = "SigtermRequest" SigkillRequest = "SigkillRequest" F = t.TypeVar("F", bound=t.Callable[..., t.Any]) def _get_future() -> t.Union[Future, CFuture]: """Get an appropriate Future object""" try: asyncio.get_running_loop() return Future() except RuntimeError: # No event loop running, use concurrent future return CFuture() def in_pending_state(method: F) -> F: """Sets the kernel to a pending state by creating a fresh Future for the KernelManager's `ready` attribute. Once the method is finished, set the Future's results. """ @t.no_type_check @functools.wraps(method) async def wrapper(self: t.Any, *args: t.Any, **kwargs: t.Any) -> t.Any: """Create a future for the decorated method.""" if self._attempted_start or not self._ready: self._ready = _get_future() try: # call wrapped method, await, and set the result or exception. out = await method(self, *args, **kwargs) # Add a small sleep to ensure tests can capture the state before done await asyncio.sleep(0.01) if self.owns_kernel: self._ready.set_result(None) return out except Exception as e: self._ready.set_exception(e) self.log.exception(self._ready.exception()) raise e return t.cast(F, wrapper) class KernelManager(ConnectionFileMixin): """Manages a single kernel in a subprocess on this host. This version starts kernels with Popen. """ _ready: t.Optional[t.Union[Future, CFuture]] def __init__(self, *args: t.Any, **kwargs: t.Any) -> None: """Initialize a kernel manager.""" if args: warnings.warn( "Passing positional only arguments to " "`KernelManager.__init__` is deprecated since jupyter_client" " 8.6, and will become an error on future versions. Positional " " arguments have been ignored since jupyter_client 7.0", DeprecationWarning, stacklevel=2, ) self._owns_kernel = kwargs.pop("owns_kernel", True) super().__init__(**kwargs) self._shutdown_status = _ShutdownStatus.Unset self._attempted_start = False self._ready = None _created_context: Bool = Bool(False) # The PyZMQ Context to use for communication with the kernel. context: Instance = Instance(zmq.Context) @default("context") def _context_default(self) -> zmq.Context: self._created_context = True return zmq.Context() # the class to create with our `client` method client_class: DottedObjectName = DottedObjectName( "jupyter_client.blocking.BlockingKernelClient" ) client_factory: Type = Type(klass=KernelClient) @default("client_factory") def _client_factory_default(self) -> Type: return import_item(self.client_class) @observe("client_class") def _client_class_changed(self, change: t.Dict[str, DottedObjectName]) -> None: self.client_factory = import_item(str(change["new"])) kernel_id: t.Union[str, Unicode] = Unicode(None, allow_none=True) # The kernel provisioner with which this KernelManager is communicating. # This will generally be a LocalProvisioner instance unless the kernelspec # indicates otherwise. provisioner: t.Optional[KernelProvisionerBase] = None kernel_spec_manager: Instance = Instance(kernelspec.KernelSpecManager) @default("kernel_spec_manager") def _kernel_spec_manager_default(self) -> kernelspec.KernelSpecManager: return kernelspec.KernelSpecManager(data_dir=self.data_dir) @observe("kernel_spec_manager") @observe_compat def _kernel_spec_manager_changed(self, change: t.Dict[str, Instance]) -> None: self._kernel_spec = None shutdown_wait_time: Float = Float( 5.0, config=True, help="Time to wait for a kernel to terminate before killing it, " "in seconds. When a shutdown request is initiated, the kernel " "will be immediately sent an interrupt (SIGINT), followed" "by a shutdown_request message, after 1/2 of `shutdown_wait_time`" "it will be sent a terminate (SIGTERM) request, and finally at " "the end of `shutdown_wait_time` will be killed (SIGKILL). terminate " "and kill may be equivalent on windows. Note that this value can be" "overridden by the in-use kernel provisioner since shutdown times may" "vary by provisioned environment.", ) kernel_name: t.Union[str, Unicode] = Unicode(kernelspec.NATIVE_KERNEL_NAME) @observe("kernel_name") def _kernel_name_changed(self, change: t.Dict[str, str]) -> None: self._kernel_spec = None if change["new"] == "python": self.kernel_name = kernelspec.NATIVE_KERNEL_NAME _kernel_spec: t.Optional[kernelspec.KernelSpec] = None @property def kernel_spec(self) -> t.Optional[kernelspec.KernelSpec]: if self._kernel_spec is None and self.kernel_name != "": self._kernel_spec = self.kernel_spec_manager.get_kernel_spec(self.kernel_name) return self._kernel_spec cache_ports: Bool = Bool( False, config=True, help="True if the MultiKernelManager should cache ports for this KernelManager instance", ) @default("cache_ports") def _default_cache_ports(self) -> bool: return self.transport == "tcp" @property def ready(self) -> t.Union[CFuture, Future]: """A future that resolves when the kernel process has started for the first time""" if not self._ready: self._ready = _get_future() return self._ready @property def ipykernel(self) -> bool: return self.kernel_name in {"python", "python2", "python3"} # Protected traits _launch_args: t.Optional["Dict[str, Any]"] = Dict(allow_none=True) _control_socket: Any = Any() _restarter: Any = Any() autorestart: Bool = Bool( True, config=True, help="""Should we autorestart the kernel if it dies.""" ) shutting_down: bool = False def __del__(self) -> None: self._close_control_socket() self.cleanup_connection_file() # -------------------------------------------------------------------------- # Kernel restarter # -------------------------------------------------------------------------- def start_restarter(self) -> None: """Start the kernel restarter.""" pass def stop_restarter(self) -> None: """Stop the kernel restarter.""" pass def add_restart_callback(self, callback: t.Callable, event: str = "restart") -> None: """Register a callback to be called when a kernel is restarted""" if self._restarter is None: return self._restarter.add_callback(callback, event) def remove_restart_callback(self, callback: t.Callable, event: str = "restart") -> None: """Unregister a callback to be called when a kernel is restarted""" if self._restarter is None: return self._restarter.remove_callback(callback, event) # -------------------------------------------------------------------------- # create a Client connected to our Kernel # -------------------------------------------------------------------------- def client(self, **kwargs: t.Any) -> BlockingKernelClient: """Create a client configured to connect to our kernel""" kw: dict = {} kw.update(self.get_connection_info(session=True)) kw.update( { "connection_file": self.connection_file, "parent": self, } ) # add kwargs last, for manual overrides kw.update(kwargs) return self.client_factory(**kw) # -------------------------------------------------------------------------- # Kernel management # -------------------------------------------------------------------------- def update_env(self, *, env: t.Dict[str, str]) -> None: """ Allow to update the environment of a kernel manager. This will take effect only after kernel restart when the new env is passed to the new kernel. This is useful as some of the information of the current kernel reflect the state of the session that started it, and those session information (like the attach file path, or name), are mutable. .. version-added: 8.5 """ # Mypy think this is unreachable as it see _launch_args as Dict, not t.Dict if ( isinstance(self._launch_args, dict) and "env" in self._launch_args and isinstance(self._launch_args["env"], dict) # type: ignore [unreachable] ): self._launch_args["env"].update(env) # type: ignore [unreachable] def format_kernel_cmd(self, extra_arguments: t.Optional[t.List[str]] = None) -> t.List[str]: """Replace templated args (e.g. {connection_file})""" extra_arguments = extra_arguments or [] assert self.kernel_spec is not None cmd = self.kernel_spec.argv + extra_arguments if cmd and cmd[0] in { "python", "python%i" % sys.version_info[0], "python%i.%i" % sys.version_info[:2], }: # executable is 'python' or 'python3', use sys.executable. # These will typically be the same, # but if the current process is in an env # and has been launched by abspath without # activating the env, python on PATH may not be sys.executable, # but it should be. cmd[0] = sys.executable # Make sure to use the realpath for the connection_file # On windows, when running with the store python, the connection_file path # is not usable by non python kernels because the path is being rerouted when # inside of a store app. # See this bug here: https://bugs.python.org/issue41196 ns: t.Dict[str, t.Any] = { "connection_file": os.path.realpath(self.connection_file), "prefix": sys.prefix, } if self.kernel_spec: # type:ignore[truthy-bool] ns["resource_dir"] = self.kernel_spec.resource_dir assert isinstance(self._launch_args, dict) ns.update(self._launch_args) pat = re.compile(r"\{([A-Za-z0-9_]+)\}") def from_ns(match: t.Any) -> t.Any: """Get the key out of ns if it's there, otherwise no change.""" return ns.get(match.group(1), match.group()) return [pat.sub(from_ns, arg) for arg in cmd] async def _async_launch_kernel(self, kernel_cmd: t.List[str], **kw: t.Any) -> None: """actually launch the kernel override in a subclass to launch kernel subprocesses differently Note that provisioners can now be used to customize kernel environments and """ assert self.provisioner is not None connection_info = await self.provisioner.launch_kernel(kernel_cmd, **kw) assert self.provisioner.has_process # Provisioner provides the connection information. Load into kernel manager # and write the connection file, if not already done. self._reconcile_connection_info(connection_info) _launch_kernel = run_sync(_async_launch_kernel) # Control socket used for polite kernel shutdown def _connect_control_socket(self) -> None: if self._control_socket is None: self._control_socket = self._create_connected_socket("control") self._control_socket.linger = 100 def _close_control_socket(self) -> None: if self._control_socket is None: return self._control_socket.close() self._control_socket = None async def _async_pre_start_kernel( self, **kw: t.Any ) -> t.Tuple[t.List[str], t.Dict[str, t.Any]]: """Prepares a kernel for startup in a separate process. If random ports (port=0) are being used, this method must be called before the channels are created. Parameters ---------- `**kw` : optional keyword arguments that are passed down to build the kernel_cmd and launching the kernel (e.g. Popen kwargs). """ self.shutting_down = False self.kernel_id = self.kernel_id or kw.pop("kernel_id", str(uuid.uuid4())) # save kwargs for use in restart # assigning Traitlets Dicts to Dict make mypy unhappy but is ok self._launch_args = kw.copy() # type:ignore [assignment] if self.provisioner is None: # will not be None on restarts self.provisioner = KPF.instance(parent=self.parent).create_provisioner_instance( self.kernel_id, self.kernel_spec, parent=self, ) kw = await self.provisioner.pre_launch(**kw) kernel_cmd = kw.pop("cmd") return kernel_cmd, kw pre_start_kernel = run_sync(_async_pre_start_kernel) async def _async_post_start_kernel(self, **kw: t.Any) -> None: """Performs any post startup tasks relative to the kernel. Parameters ---------- `**kw` : optional keyword arguments that were used in the kernel process's launch. """ self.start_restarter() self._connect_control_socket() assert self.provisioner is not None await self.provisioner.post_launch(**kw) post_start_kernel = run_sync(_async_post_start_kernel) @in_pending_state async def _async_start_kernel(self, **kw: t.Any) -> None: """Starts a kernel on this host in a separate process. If random ports (port=0) are being used, this method must be called before the channels are created. Parameters ---------- `**kw` : optional keyword arguments that are passed down to build the kernel_cmd and launching the kernel (e.g. Popen kwargs). """ self._attempted_start = True kernel_cmd, kw = await self._async_pre_start_kernel(**kw) # launch the kernel subprocess self.log.debug("Starting kernel: %s", kernel_cmd) await self._async_launch_kernel(kernel_cmd, **kw) await self._async_post_start_kernel(**kw) start_kernel = run_sync(_async_start_kernel) async def _async_request_shutdown(self, restart: bool = False) -> None: """Send a shutdown request via control channel""" content = {"restart": restart} msg = self.session.msg("shutdown_request", content=content) # ensure control socket is connected self._connect_control_socket() self.session.send(self._control_socket, msg) assert self.provisioner is not None await self.provisioner.shutdown_requested(restart=restart) self._shutdown_status = _ShutdownStatus.ShutdownRequest request_shutdown = run_sync(_async_request_shutdown) async def _async_finish_shutdown( self, waittime: t.Optional[float] = None, pollinterval: float = 0.1, restart: bool = False, ) -> None: """Wait for kernel shutdown, then kill process if it doesn't shutdown. This does not send shutdown requests - use :meth:`request_shutdown` first. """ if waittime is None: waittime = max(self.shutdown_wait_time, 0) if self.provisioner: # Allow provisioner to override waittime = self.provisioner.get_shutdown_wait_time(recommended=waittime) try: await asyncio.wait_for( self._async_wait(pollinterval=pollinterval), timeout=waittime / 2 ) except asyncio.TimeoutError: self.log.debug("Kernel is taking too long to finish, terminating") self._shutdown_status = _ShutdownStatus.SigtermRequest await self._async_send_kernel_sigterm() try: await asyncio.wait_for( self._async_wait(pollinterval=pollinterval), timeout=waittime / 2 ) except asyncio.TimeoutError: self.log.debug("Kernel is taking too long to finish, killing") self._shutdown_status = _ShutdownStatus.SigkillRequest await self._async_kill_kernel(restart=restart) else: # Process is no longer alive, wait and clear if self.has_kernel: assert self.provisioner is not None await self.provisioner.wait() finish_shutdown = run_sync(_async_finish_shutdown) async def _async_cleanup_resources(self, restart: bool = False) -> None: """Clean up resources when the kernel is shut down""" if not restart: self.cleanup_connection_file() self.cleanup_ipc_files() self._close_control_socket() self.session.parent = None if self._created_context and not restart: self.context.destroy(linger=100) if self.provisioner: await self.provisioner.cleanup(restart=restart) cleanup_resources = run_sync(_async_cleanup_resources) @in_pending_state async def _async_shutdown_kernel(self, now: bool = False, restart: bool = False) -> None: """Attempts to stop the kernel process cleanly. This attempts to shutdown the kernels cleanly by: 1. Sending it a shutdown message over the control channel. 2. If that fails, the kernel is shutdown forcibly by sending it a signal. Parameters ---------- now : bool Should the kernel be forcible killed *now*. This skips the first, nice shutdown attempt. restart: bool Will this kernel be restarted after it is shutdown. When this is True, connection files will not be cleaned up. """ if not self.owns_kernel: return self.shutting_down = True # Used by restarter to prevent race condition # Stop monitoring for restarting while we shutdown. self.stop_restarter() if self.has_kernel: await self._async_interrupt_kernel() if now: await self._async_kill_kernel() else: await self._async_request_shutdown(restart=restart) # Don't send any additional kernel kill messages immediately, to give # the kernel a chance to properly execute shutdown actions. Wait for at # most 1s, checking every 0.1s. await self._async_finish_shutdown(restart=restart) await self._async_cleanup_resources(restart=restart) shutdown_kernel = run_sync(_async_shutdown_kernel) async def _async_restart_kernel( self, now: bool = False, newports: bool = False, **kw: t.Any ) -> None: """Restarts a kernel with the arguments that were used to launch it. Parameters ---------- now : bool, optional If True, the kernel is forcefully restarted *immediately*, without having a chance to do any cleanup action. Otherwise the kernel is given 1s to clean up before a forceful restart is issued. In all cases the kernel is restarted, the only difference is whether it is given a chance to perform a clean shutdown or not. newports : bool, optional If the old kernel was launched with random ports, this flag decides whether the same ports and connection file will be used again. If False, the same ports and connection file are used. This is the default. If True, new random port numbers are chosen and a new connection file is written. It is still possible that the newly chosen random port numbers happen to be the same as the old ones. `**kw` : optional Any options specified here will overwrite those used to launch the kernel. """ if self._launch_args is None: msg = "Cannot restart the kernel. No previous call to 'start_kernel'." raise RuntimeError(msg) # Stop currently running kernel. await self._async_shutdown_kernel(now=now, restart=True) if newports: self.cleanup_random_ports() # Start new kernel. self._launch_args.update(kw) await self._async_start_kernel(**self._launch_args) restart_kernel = run_sync(_async_restart_kernel) @property def owns_kernel(self) -> bool: return self._owns_kernel @property def has_kernel(self) -> bool: """Has a kernel process been started that we are actively managing.""" return self.provisioner is not None and self.provisioner.has_process async def _async_send_kernel_sigterm(self, restart: bool = False) -> None: """similar to _kill_kernel, but with sigterm (not sigkill), but do not block""" if self.has_kernel: assert self.provisioner is not None await self.provisioner.terminate(restart=restart) _send_kernel_sigterm = run_sync(_async_send_kernel_sigterm) async def _async_kill_kernel(self, restart: bool = False) -> None: """Kill the running kernel. This is a private method, callers should use shutdown_kernel(now=True). """ if self.has_kernel: assert self.provisioner is not None await self.provisioner.kill(restart=restart) # Wait until the kernel terminates. try: await asyncio.wait_for(self._async_wait(), timeout=5.0) except asyncio.TimeoutError: # Wait timed out, just log warning but continue - not much more we can do. self.log.warning("Wait for final termination of kernel timed out - continuing...") pass else: # Process is no longer alive, wait and clear if self.has_kernel: await self.provisioner.wait() _kill_kernel = run_sync(_async_kill_kernel) async def _async_interrupt_kernel(self) -> None: """Interrupts the kernel by sending it a signal. Unlike ``signal_kernel``, this operation is well supported on all platforms. """ if not self.has_kernel and self._ready is not None: if isinstance(self._ready, CFuture): ready = asyncio.ensure_future(t.cast(Future[t.Any], self._ready)) else: ready = self._ready # Wait for a shutdown if one is in progress. if self.shutting_down: await ready # Wait for a startup. await ready if self.has_kernel: assert self.kernel_spec is not None interrupt_mode = self.kernel_spec.interrupt_mode if interrupt_mode == "signal": await self._async_signal_kernel(signal.SIGINT) elif interrupt_mode == "message": msg = self.session.msg("interrupt_request", content={}) self._connect_control_socket() self.session.send(self._control_socket, msg) else: msg = "Cannot interrupt kernel. No kernel is running!" raise RuntimeError(msg) interrupt_kernel = run_sync(_async_interrupt_kernel) async def _async_signal_kernel(self, signum: int) -> None: """Sends a signal to the process group of the kernel (this usually includes the kernel and any subprocesses spawned by the kernel). Note that since only SIGTERM is supported on Windows, this function is only useful on Unix systems. """ if self.has_kernel: assert self.provisioner is not None await self.provisioner.send_signal(signum) else: msg = "Cannot signal kernel. No kernel is running!" raise RuntimeError(msg) signal_kernel = run_sync(_async_signal_kernel) async def _async_is_alive(self) -> bool: """Is the kernel process still running?""" if not self.owns_kernel: return True if self.has_kernel: assert self.provisioner is not None ret = await self.provisioner.poll() if ret is None: return True return False is_alive = run_sync(_async_is_alive) async def _async_wait(self, pollinterval: float = 0.1) -> None: # Use busy loop at 100ms intervals, polling until the process is # not alive. If we find the process is no longer alive, complete # its cleanup via the blocking wait(). Callers are responsible for # issuing calls to wait() using a timeout (see _kill_kernel()). while await self._async_is_alive(): await asyncio.sleep(pollinterval) class AsyncKernelManager(KernelManager): """An async kernel manager.""" # the class to create with our `client` method client_class: DottedObjectName = DottedObjectName( "jupyter_client.asynchronous.AsyncKernelClient" ) client_factory: Type = Type(klass="jupyter_client.asynchronous.AsyncKernelClient") # The PyZMQ Context to use for communication with the kernel. context: Instance = Instance(zmq.asyncio.Context) @default("context") def _context_default(self) -> zmq.asyncio.Context: self._created_context = True return zmq.asyncio.Context() def client( # type:ignore[override] self, **kwargs: t.Any ) -> AsyncKernelClient: """Get a client for the manager.""" return super().client(**kwargs) # type:ignore[return-value] _launch_kernel = KernelManager._async_launch_kernel # type:ignore[assignment] start_kernel: t.Callable[..., t.Awaitable] = KernelManager._async_start_kernel # type:ignore[assignment] pre_start_kernel: t.Callable[..., t.Awaitable] = KernelManager._async_pre_start_kernel # type:ignore[assignment] post_start_kernel: t.Callable[..., t.Awaitable] = KernelManager._async_post_start_kernel # type:ignore[assignment] request_shutdown: t.Callable[..., t.Awaitable] = KernelManager._async_request_shutdown # type:ignore[assignment] finish_shutdown: t.Callable[..., t.Awaitable] = KernelManager._async_finish_shutdown # type:ignore[assignment] cleanup_resources: t.Callable[..., t.Awaitable] = KernelManager._async_cleanup_resources # type:ignore[assignment] shutdown_kernel: t.Callable[..., t.Awaitable] = KernelManager._async_shutdown_kernel # type:ignore[assignment] restart_kernel: t.Callable[..., t.Awaitable] = KernelManager._async_restart_kernel # type:ignore[assignment] _send_kernel_sigterm = KernelManager._async_send_kernel_sigterm # type:ignore[assignment] _kill_kernel = KernelManager._async_kill_kernel # type:ignore[assignment] interrupt_kernel: t.Callable[..., t.Awaitable] = KernelManager._async_interrupt_kernel # type:ignore[assignment] signal_kernel: t.Callable[..., t.Awaitable] = KernelManager._async_signal_kernel # type:ignore[assignment] is_alive: t.Callable[..., t.Awaitable] = KernelManager._async_is_alive # type:ignore[assignment] KernelManagerABC.register(KernelManager) def start_new_kernel( startup_timeout: float = 60, kernel_name: str = "python", **kwargs: t.Any ) -> t.Tuple[KernelManager, BlockingKernelClient]: """Start a new kernel, and return its Manager and Client""" km = KernelManager(kernel_name=kernel_name) km.start_kernel(**kwargs) kc = km.client() kc.start_channels() try: kc.wait_for_ready(timeout=startup_timeout) except RuntimeError: kc.stop_channels() km.shutdown_kernel() raise return km, kc async def start_new_async_kernel( startup_timeout: float = 60, kernel_name: str = "python", **kwargs: t.Any ) -> t.Tuple[AsyncKernelManager, AsyncKernelClient]: """Start a new kernel, and return its Manager and Client""" km = AsyncKernelManager(kernel_name=kernel_name) await km.start_kernel(**kwargs) kc = km.client() kc.start_channels() try: await kc.wait_for_ready(timeout=startup_timeout) except RuntimeError: kc.stop_channels() await km.shutdown_kernel() raise return (km, kc) @contextmanager def run_kernel(**kwargs: t.Any) -> t.Iterator[KernelClient]: """Context manager to create a kernel in a subprocess. The kernel is shut down when the context exits. Returns ------- kernel_client: connected KernelClient instance """ km, kc = start_new_kernel(**kwargs) try: yield kc finally: kc.stop_channels() km.shutdown_kernel(now=True) jupyter_client-8.6.2/jupyter_client/managerabc.py000066400000000000000000000027221462351563100222720ustar00rootroot00000000000000"""Abstract base class for kernel managers.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import abc from typing import Any class KernelManagerABC(metaclass=abc.ABCMeta): """KernelManager ABC. The docstrings for this class can be found in the base implementation: `jupyter_client.manager.KernelManager` """ @abc.abstractproperty def kernel(self) -> Any: pass # -------------------------------------------------------------------------- # Kernel management # -------------------------------------------------------------------------- @abc.abstractmethod def start_kernel(self, **kw: Any) -> None: """Start the kernel.""" pass @abc.abstractmethod def shutdown_kernel(self, now: bool = False, restart: bool = False) -> None: """Shut down the kernel.""" pass @abc.abstractmethod def restart_kernel(self, now: bool = False, **kw: Any) -> None: """Restart the kernel.""" pass @abc.abstractproperty def has_kernel(self) -> bool: pass @abc.abstractmethod def interrupt_kernel(self) -> None: """Interrupt the kernel.""" pass @abc.abstractmethod def signal_kernel(self, signum: int) -> None: """Send a signal to the kernel.""" pass @abc.abstractmethod def is_alive(self) -> bool: """Test whether the kernel is alive.""" pass jupyter_client-8.6.2/jupyter_client/multikernelmanager.py000066400000000000000000000541111462351563100240770ustar00rootroot00000000000000"""A kernel manager for multiple kernels""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import asyncio import json import os import socket import typing as t import uuid from functools import wraps from pathlib import Path import zmq from traitlets import Any, Bool, Dict, DottedObjectName, Instance, Unicode, default, observe from traitlets.config.configurable import LoggingConfigurable from traitlets.utils.importstring import import_item from .connect import KernelConnectionInfo from .kernelspec import NATIVE_KERNEL_NAME, KernelSpecManager from .manager import KernelManager from .utils import ensure_async, run_sync, utcnow class DuplicateKernelError(Exception): pass def kernel_method(f: t.Callable) -> t.Callable: """decorator for proxying MKM.method(kernel_id) to individual KMs by ID""" @wraps(f) def wrapped( self: t.Any, kernel_id: str, *args: t.Any, **kwargs: t.Any ) -> t.Callable | t.Awaitable: # get the kernel km = self.get_kernel(kernel_id) method = getattr(km, f.__name__) # call the kernel's method r = method(*args, **kwargs) # last thing, call anything defined in the actual class method # such as logging messages f(self, kernel_id, *args, **kwargs) # return the method result return r return wrapped class MultiKernelManager(LoggingConfigurable): """A class for managing multiple kernels.""" default_kernel_name = Unicode( NATIVE_KERNEL_NAME, help="The name of the default kernel to start" ).tag(config=True) kernel_spec_manager = Instance(KernelSpecManager, allow_none=True) kernel_manager_class = DottedObjectName( "jupyter_client.ioloop.IOLoopKernelManager", help="""The kernel manager class. This is configurable to allow subclassing of the KernelManager for customized behavior. """, ).tag(config=True) @observe("kernel_manager_class") def _kernel_manager_class_changed(self, change: t.Any) -> None: self.kernel_manager_factory = self._create_kernel_manager_factory() kernel_manager_factory = Any(help="this is kernel_manager_class after import") @default("kernel_manager_factory") def _kernel_manager_factory_default(self) -> t.Callable: return self._create_kernel_manager_factory() def _create_kernel_manager_factory(self) -> t.Callable: kernel_manager_ctor = import_item(self.kernel_manager_class) def create_kernel_manager(*args: t.Any, **kwargs: t.Any) -> KernelManager: if self.shared_context: if self.context.closed: # recreate context if closed self.context = self._context_default() kwargs.setdefault("context", self.context) km = kernel_manager_ctor(*args, **kwargs) return km return create_kernel_manager shared_context = Bool( True, help="Share a single zmq.Context to talk to all my kernels", ).tag(config=True) context = Instance("zmq.Context") _created_context = Bool(False) _pending_kernels = Dict() @property def _starting_kernels(self) -> dict: """A shim for backwards compatibility.""" return self._pending_kernels @default("context") def _context_default(self) -> zmq.Context: self._created_context = True return zmq.Context() connection_dir = Unicode("") external_connection_dir = Unicode(None, allow_none=True) _kernels = Dict() def __init__(self, *args: t.Any, **kwargs: t.Any) -> None: super().__init__(*args, **kwargs) self.kernel_id_to_connection_file: dict[str, Path] = {} def __del__(self) -> None: """Handle garbage collection. Destroy context if applicable.""" if self._created_context and self.context and not self.context.closed: if self.log: self.log.debug("Destroying zmq context for %s", self) self.context.destroy() try: super_del = super().__del__ # type:ignore[misc] except AttributeError: pass else: super_del() def list_kernel_ids(self) -> list[str]: """Return a list of the kernel ids of the active kernels.""" if self.external_connection_dir is not None: external_connection_dir = Path(self.external_connection_dir) if external_connection_dir.is_dir(): connection_files = [p for p in external_connection_dir.iterdir() if p.is_file()] # remove kernels (whose connection file has disappeared) from our list k = list(self.kernel_id_to_connection_file.keys()) v = list(self.kernel_id_to_connection_file.values()) for connection_file in list(self.kernel_id_to_connection_file.values()): if connection_file not in connection_files: kernel_id = k[v.index(connection_file)] del self.kernel_id_to_connection_file[kernel_id] del self._kernels[kernel_id] # add kernels (whose connection file appeared) to our list for connection_file in connection_files: if connection_file in self.kernel_id_to_connection_file.values(): continue try: connection_info: KernelConnectionInfo = json.loads( connection_file.read_text() ) except Exception: # noqa: S112 continue self.log.debug("Loading connection file %s", connection_file) if not ("kernel_name" in connection_info and "key" in connection_info): continue # it looks like a connection file kernel_id = self.new_kernel_id() self.kernel_id_to_connection_file[kernel_id] = connection_file km = self.kernel_manager_factory( parent=self, log=self.log, owns_kernel=False, ) km.load_connection_info(connection_info) km.last_activity = utcnow() km.execution_state = "idle" km.connections = 1 km.kernel_id = kernel_id km.kernel_name = connection_info["kernel_name"] km.ready.set_result(None) self._kernels[kernel_id] = km # Create a copy so we can iterate over kernels in operations # that delete keys. return list(self._kernels.keys()) def __len__(self) -> int: """Return the number of running kernels.""" return len(self.list_kernel_ids()) def __contains__(self, kernel_id: str) -> bool: return kernel_id in self._kernels def pre_start_kernel( self, kernel_name: str | None, kwargs: t.Any ) -> tuple[KernelManager, str, str]: # kwargs should be mutable, passing it as a dict argument. kernel_id = kwargs.pop("kernel_id", self.new_kernel_id(**kwargs)) if kernel_id in self: raise DuplicateKernelError("Kernel already exists: %s" % kernel_id) if kernel_name is None: kernel_name = self.default_kernel_name # kernel_manager_factory is the constructor for the KernelManager # subclass we are using. It can be configured as any Configurable, # including things like its transport and ip. constructor_kwargs = {} if self.kernel_spec_manager: constructor_kwargs["kernel_spec_manager"] = self.kernel_spec_manager km = self.kernel_manager_factory( connection_file=os.path.join(self.connection_dir, "kernel-%s.json" % kernel_id), parent=self, log=self.log, kernel_name=kernel_name, **constructor_kwargs, ) return km, kernel_name, kernel_id def update_env(self, *, kernel_id: str, env: t.Dict[str, str]) -> None: """ Allow to update the environment of the given kernel. Forward the update env request to the corresponding kernel. .. version-added: 8.5 """ if kernel_id in self: self._kernels[kernel_id].update_env(env=env) async def _add_kernel_when_ready( self, kernel_id: str, km: KernelManager, kernel_awaitable: t.Awaitable ) -> None: try: await kernel_awaitable self._kernels[kernel_id] = km self._pending_kernels.pop(kernel_id, None) except Exception as e: self.log.exception(e) async def _remove_kernel_when_ready( self, kernel_id: str, kernel_awaitable: t.Awaitable ) -> None: try: await kernel_awaitable self.remove_kernel(kernel_id) self._pending_kernels.pop(kernel_id, None) except Exception as e: self.log.exception(e) def _using_pending_kernels(self) -> bool: """Returns a boolean; a clearer method for determining if this multikernelmanager is using pending kernels or not """ return getattr(self, "use_pending_kernels", False) async def _async_start_kernel(self, *, kernel_name: str | None = None, **kwargs: t.Any) -> str: """Start a new kernel. The caller can pick a kernel_id by passing one in as a keyword arg, otherwise one will be generated using new_kernel_id(). The kernel ID for the newly started kernel is returned. """ km, kernel_name, kernel_id = self.pre_start_kernel(kernel_name, kwargs) if not isinstance(km, KernelManager): self.log.warning( # type:ignore[unreachable] "Kernel manager class ({km_class}) is not an instance of 'KernelManager'!".format( km_class=self.kernel_manager_class.__class__ ) ) kwargs["kernel_id"] = kernel_id # Make kernel_id available to manager and provisioner starter = ensure_async(km.start_kernel(**kwargs)) task = asyncio.create_task(self._add_kernel_when_ready(kernel_id, km, starter)) self._pending_kernels[kernel_id] = task # Handling a Pending Kernel if self._using_pending_kernels(): # If using pending kernels, do not block # on the kernel start. self._kernels[kernel_id] = km else: await task # raise an exception if one occurred during kernel startup. if km.ready.exception(): raise km.ready.exception() # type: ignore[misc] return kernel_id start_kernel = run_sync(_async_start_kernel) async def _async_shutdown_kernel( self, kernel_id: str, now: bool | None = False, restart: bool | None = False, ) -> None: """Shutdown a kernel by its kernel uuid. Parameters ========== kernel_id : uuid The id of the kernel to shutdown. now : bool Should the kernel be shutdown forcibly using a signal. restart : bool Will the kernel be restarted? """ self.log.info("Kernel shutdown: %s", kernel_id) # If the kernel is still starting, wait for it to be ready. if kernel_id in self._pending_kernels: task = self._pending_kernels[kernel_id] try: await task km = self.get_kernel(kernel_id) await t.cast(asyncio.Future, km.ready) except asyncio.CancelledError: pass except Exception: self.remove_kernel(kernel_id) return km = self.get_kernel(kernel_id) # If a pending kernel raised an exception, remove it. if not km.ready.cancelled() and km.ready.exception(): self.remove_kernel(kernel_id) return stopper = ensure_async(km.shutdown_kernel(now, restart)) fut = asyncio.ensure_future(self._remove_kernel_when_ready(kernel_id, stopper)) self._pending_kernels[kernel_id] = fut # Await the kernel if not using pending kernels. if not self._using_pending_kernels(): await fut # raise an exception if one occurred during kernel shutdown. if km.ready.exception(): raise km.ready.exception() # type: ignore[misc] shutdown_kernel = run_sync(_async_shutdown_kernel) @kernel_method def request_shutdown(self, kernel_id: str, restart: bool | None = False) -> None: """Ask a kernel to shut down by its kernel uuid""" @kernel_method def finish_shutdown( self, kernel_id: str, waittime: float | None = None, pollinterval: float | None = 0.1, ) -> None: """Wait for a kernel to finish shutting down, and kill it if it doesn't""" self.log.info("Kernel shutdown: %s", kernel_id) @kernel_method def cleanup_resources(self, kernel_id: str, restart: bool = False) -> None: """Clean up a kernel's resources""" def remove_kernel(self, kernel_id: str) -> KernelManager: """remove a kernel from our mapping. Mainly so that a kernel can be removed if it is already dead, without having to call shutdown_kernel. The kernel object is returned, or `None` if not found. """ return self._kernels.pop(kernel_id, None) async def _async_shutdown_all(self, now: bool = False) -> None: """Shutdown all kernels.""" kids = self.list_kernel_ids() kids += list(self._pending_kernels) kms = list(self._kernels.values()) futs = [self._async_shutdown_kernel(kid, now=now) for kid in set(kids)] await asyncio.gather(*futs) # If using pending kernels, the kernels will not have been fully shut down. if self._using_pending_kernels(): for km in kms: try: await km.ready except asyncio.CancelledError: self._pending_kernels[km.kernel_id].cancel() except Exception: # Will have been logged in _add_kernel_when_ready pass shutdown_all = run_sync(_async_shutdown_all) def interrupt_kernel(self, kernel_id: str) -> None: """Interrupt (SIGINT) the kernel by its uuid. Parameters ========== kernel_id : uuid The id of the kernel to interrupt. """ kernel = self.get_kernel(kernel_id) if not kernel.ready.done(): msg = "Kernel is in a pending state. Cannot interrupt." raise RuntimeError(msg) out = kernel.interrupt_kernel() self.log.info("Kernel interrupted: %s", kernel_id) return out @kernel_method def signal_kernel(self, kernel_id: str, signum: int) -> None: """Sends a signal to the kernel by its uuid. Note that since only SIGTERM is supported on Windows, this function is only useful on Unix systems. Parameters ========== kernel_id : uuid The id of the kernel to signal. signum : int Signal number to send kernel. """ self.log.info("Signaled Kernel %s with %s", kernel_id, signum) async def _async_restart_kernel(self, kernel_id: str, now: bool = False) -> None: """Restart a kernel by its uuid, keeping the same ports. Parameters ========== kernel_id : uuid The id of the kernel to interrupt. now : bool, optional If True, the kernel is forcefully restarted *immediately*, without having a chance to do any cleanup action. Otherwise the kernel is given 1s to clean up before a forceful restart is issued. In all cases the kernel is restarted, the only difference is whether it is given a chance to perform a clean shutdown or not. """ kernel = self.get_kernel(kernel_id) if self._using_pending_kernels() and not kernel.ready.done(): msg = "Kernel is in a pending state. Cannot restart." raise RuntimeError(msg) await ensure_async(kernel.restart_kernel(now=now)) self.log.info("Kernel restarted: %s", kernel_id) restart_kernel = run_sync(_async_restart_kernel) @kernel_method def is_alive(self, kernel_id: str) -> bool: # type:ignore[empty-body] """Is the kernel alive. This calls KernelManager.is_alive() which calls Popen.poll on the actual kernel subprocess. Parameters ========== kernel_id : uuid The id of the kernel. """ def _check_kernel_id(self, kernel_id: str) -> None: """check that a kernel id is valid""" if kernel_id not in self: raise KeyError("Kernel with id not found: %s" % kernel_id) def get_kernel(self, kernel_id: str) -> KernelManager: """Get the single KernelManager object for a kernel by its uuid. Parameters ========== kernel_id : uuid The id of the kernel. """ self._check_kernel_id(kernel_id) return self._kernels[kernel_id] @kernel_method def add_restart_callback( self, kernel_id: str, callback: t.Callable, event: str = "restart" ) -> None: """add a callback for the KernelRestarter""" @kernel_method def remove_restart_callback( self, kernel_id: str, callback: t.Callable, event: str = "restart" ) -> None: """remove a callback for the KernelRestarter""" @kernel_method def get_connection_info(self, kernel_id: str) -> dict[str, t.Any]: # type:ignore[empty-body] """Return a dictionary of connection data for a kernel. Parameters ========== kernel_id : uuid The id of the kernel. Returns ======= connection_dict : dict A dict of the information needed to connect to a kernel. This includes the ip address and the integer port numbers of the different channels (stdin_port, iopub_port, shell_port, hb_port). """ @kernel_method def connect_iopub( # type:ignore[empty-body] self, kernel_id: str, identity: bytes | None = None ) -> socket.socket: """Return a zmq Socket connected to the iopub channel. Parameters ========== kernel_id : uuid The id of the kernel identity : bytes (optional) The zmq identity of the socket Returns ======= stream : zmq Socket or ZMQStream """ @kernel_method def connect_shell( # type:ignore[empty-body] self, kernel_id: str, identity: bytes | None = None ) -> socket.socket: """Return a zmq Socket connected to the shell channel. Parameters ========== kernel_id : uuid The id of the kernel identity : bytes (optional) The zmq identity of the socket Returns ======= stream : zmq Socket or ZMQStream """ @kernel_method def connect_control( # type:ignore[empty-body] self, kernel_id: str, identity: bytes | None = None ) -> socket.socket: """Return a zmq Socket connected to the control channel. Parameters ========== kernel_id : uuid The id of the kernel identity : bytes (optional) The zmq identity of the socket Returns ======= stream : zmq Socket or ZMQStream """ @kernel_method def connect_stdin( # type:ignore[empty-body] self, kernel_id: str, identity: bytes | None = None ) -> socket.socket: """Return a zmq Socket connected to the stdin channel. Parameters ========== kernel_id : uuid The id of the kernel identity : bytes (optional) The zmq identity of the socket Returns ======= stream : zmq Socket or ZMQStream """ @kernel_method def connect_hb( # type:ignore[empty-body] self, kernel_id: str, identity: bytes | None = None ) -> socket.socket: """Return a zmq Socket connected to the hb channel. Parameters ========== kernel_id : uuid The id of the kernel identity : bytes (optional) The zmq identity of the socket Returns ======= stream : zmq Socket or ZMQStream """ def new_kernel_id(self, **kwargs: t.Any) -> str: """ Returns the id to associate with the kernel for this request. Subclasses may override this method to substitute other sources of kernel ids. :param kwargs: :return: string-ized version 4 uuid """ return str(uuid.uuid4()) class AsyncMultiKernelManager(MultiKernelManager): kernel_manager_class = DottedObjectName( "jupyter_client.ioloop.AsyncIOLoopKernelManager", config=True, help="""The kernel manager class. This is configurable to allow subclassing of the AsyncKernelManager for customized behavior. """, ) use_pending_kernels = Bool( False, help="""Whether to make kernels available before the process has started. The kernel has a `.ready` future which can be awaited before connecting""", ).tag(config=True) context = Instance("zmq.asyncio.Context") @default("context") def _context_default(self) -> zmq.asyncio.Context: self._created_context = True return zmq.asyncio.Context() start_kernel: t.Callable[..., t.Awaitable] = MultiKernelManager._async_start_kernel # type:ignore[assignment] restart_kernel: t.Callable[..., t.Awaitable] = MultiKernelManager._async_restart_kernel # type:ignore[assignment] shutdown_kernel: t.Callable[..., t.Awaitable] = MultiKernelManager._async_shutdown_kernel # type:ignore[assignment] shutdown_all: t.Callable[..., t.Awaitable] = MultiKernelManager._async_shutdown_all # type:ignore[assignment] jupyter_client-8.6.2/jupyter_client/provisioning/000077500000000000000000000000001462351563100223635ustar00rootroot00000000000000jupyter_client-8.6.2/jupyter_client/provisioning/__init__.py000066400000000000000000000002521462351563100244730ustar00rootroot00000000000000from .factory import KernelProvisionerFactory # noqa from .local_provisioner import LocalProvisioner # noqa from .provisioner_base import KernelProvisionerBase # noqa jupyter_client-8.6.2/jupyter_client/provisioning/factory.py000066400000000000000000000226631462351563100244150ustar00rootroot00000000000000"""Kernel Provisioner Classes""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import glob import sys from os import getenv, path from typing import Any, Dict, List # See compatibility note on `group` keyword in https://docs.python.org/3/library/importlib.metadata.html#entry-points if sys.version_info < (3, 10): # pragma: no cover from importlib_metadata import EntryPoint, entry_points # type:ignore[import-not-found] else: # pragma: no cover from importlib.metadata import EntryPoint, entry_points from traitlets.config import SingletonConfigurable, Unicode, default from .provisioner_base import KernelProvisionerBase class KernelProvisionerFactory(SingletonConfigurable): """ :class:`KernelProvisionerFactory` is responsible for creating provisioner instances. A singleton instance, `KernelProvisionerFactory` is also used by the :class:`KernelSpecManager` to validate `kernel_provisioner` references found in kernel specifications to confirm their availability (in cases where the kernel specification references a kernel provisioner that has not been installed into the current Python environment). It's ``default_provisioner_name`` attribute can be used to specify the default provisioner to use when a kernel_spec is found to not reference a provisioner. It's value defaults to `"local-provisioner"` which identifies the local provisioner implemented by :class:`LocalProvisioner`. """ GROUP_NAME = "jupyter_client.kernel_provisioners" provisioners: Dict[str, EntryPoint] = {} default_provisioner_name_env = "JUPYTER_DEFAULT_PROVISIONER_NAME" default_provisioner_name = Unicode( config=True, help="""Indicates the name of the provisioner to use when no kernel_provisioner entry is present in the kernelspec.""", ) @default("default_provisioner_name") def _default_provisioner_name_default(self) -> str: """The default provisioner name.""" return getenv(self.default_provisioner_name_env, "local-provisioner") def __init__(self, **kwargs: Any) -> None: """Initialize a kernel provisioner factory.""" super().__init__(**kwargs) for ep in KernelProvisionerFactory._get_all_provisioners(): self.provisioners[ep.name] = ep def is_provisioner_available(self, kernel_spec: Any) -> bool: """ Reads the associated ``kernel_spec`` to determine the provisioner and returns whether it exists as an entry_point (True) or not (False). If the referenced provisioner is not in the current cache or cannot be loaded via entry_points, a warning message is issued indicating it is not available. """ is_available: bool = True provisioner_cfg = self._get_provisioner_config(kernel_spec) provisioner_name = str(provisioner_cfg.get("provisioner_name")) if not self._check_availability(provisioner_name): is_available = False self.log.warning( f"Kernel '{kernel_spec.display_name}' is referencing a kernel " f"provisioner ('{provisioner_name}') that is not available. " f"Ensure the appropriate package has been installed and retry." ) return is_available def create_provisioner_instance( self, kernel_id: str, kernel_spec: Any, parent: Any ) -> KernelProvisionerBase: """ Reads the associated ``kernel_spec`` to see if it has a `kernel_provisioner` stanza. If one exists, it instantiates an instance. If a kernel provisioner is not specified in the kernel specification, a default provisioner stanza is fabricated and instantiated corresponding to the current value of ``default_provisioner_name`` trait. The instantiated instance is returned. If the provisioner is found to not exist (not registered via entry_points), `ModuleNotFoundError` is raised. """ provisioner_cfg = self._get_provisioner_config(kernel_spec) provisioner_name = str(provisioner_cfg.get("provisioner_name")) if not self._check_availability(provisioner_name): msg = f"Kernel provisioner '{provisioner_name}' has not been registered." raise ModuleNotFoundError(msg) self.log.debug( f"Instantiating kernel '{kernel_spec.display_name}' with " f"kernel provisioner: {provisioner_name}" ) provisioner_class = self.provisioners[provisioner_name].load() provisioner_config = provisioner_cfg.get("config") provisioner: KernelProvisionerBase = provisioner_class( kernel_id=kernel_id, kernel_spec=kernel_spec, parent=parent, **provisioner_config ) return provisioner def _check_availability(self, provisioner_name: str) -> bool: """ Checks that the given provisioner is available. If the given provisioner is not in the current set of loaded provisioners an attempt is made to fetch the named entry point and, if successful, loads it into the cache. :param provisioner_name: :return: """ is_available = True if provisioner_name not in self.provisioners: try: ep = self._get_provisioner(provisioner_name) self.provisioners[provisioner_name] = ep # Update cache except Exception: is_available = False return is_available def _get_provisioner_config(self, kernel_spec: Any) -> Dict[str, Any]: """ Return the kernel_provisioner stanza from the kernel_spec. Checks the kernel_spec's metadata dictionary for a kernel_provisioner entry. If found, it is returned, else one is created relative to the DEFAULT_PROVISIONER and returned. Parameters ---------- kernel_spec : Any - this is a KernelSpec type but listed as Any to avoid circular import The kernel specification object from which the provisioner dictionary is derived. Returns ------- dict The provisioner portion of the kernel_spec. If one does not exist, it will contain the default information. If no `config` sub-dictionary exists, an empty `config` dictionary will be added. """ env_provisioner = kernel_spec.metadata.get("kernel_provisioner", {}) if "provisioner_name" in env_provisioner: # If no provisioner_name, return default if ( "config" not in env_provisioner ): # if provisioner_name, but no config stanza, add one env_provisioner.update({"config": {}}) return env_provisioner # Return what we found (plus config stanza if necessary) return {"provisioner_name": self.default_provisioner_name, "config": {}} def get_provisioner_entries(self) -> Dict[str, str]: """ Returns a dictionary of provisioner entries. The key is the provisioner name for its entry point. The value is the colon-separated string of the entry point's module name and object name. """ entries = {} for name, ep in self.provisioners.items(): entries[name] = ep.value return entries @staticmethod def _get_all_provisioners() -> List[EntryPoint]: """Wrapper around entry_points (to fetch the set of provisioners) - primarily to facilitate testing.""" return entry_points(group=KernelProvisionerFactory.GROUP_NAME) def _get_provisioner(self, name: str) -> EntryPoint: """Wrapper around entry_points (to fetch a single provisioner) - primarily to facilitate testing.""" eps = entry_points(group=KernelProvisionerFactory.GROUP_NAME, name=name) if eps: return eps[0] # Check if the entrypoint name is 'local-provisioner'. Although this should never # happen, we have seen cases where the previous distribution of jupyter_client has # remained which doesn't include kernel-provisioner entrypoints (so 'local-provisioner' # is deemed not found even though its definition is in THIS package). In such cases, # the entrypoints package uses what it first finds - which is the older distribution # resulting in a violation of a supposed invariant condition. To address this scenario, # we will log a warning message indicating this situation, then build the entrypoint # instance ourselves - since we have that information. if name == "local-provisioner": distros = glob.glob(f"{path.dirname(path.dirname(__file__))}-*") self.log.warning( f"Kernel Provisioning: The 'local-provisioner' is not found. This is likely " f"due to the presence of multiple jupyter_client distributions and a previous " f"distribution is being used as the source for entrypoints - which does not " f"include 'local-provisioner'. That distribution should be removed such that " f"only the version-appropriate distribution remains (version >= 7). Until " f"then, a 'local-provisioner' entrypoint will be automatically constructed " f"and used.\nThe candidate distribution locations are: {distros}" ) return EntryPoint( "local-provisioner", "jupyter_client.provisioning", "LocalProvisioner" ) raise jupyter_client-8.6.2/jupyter_client/provisioning/local_provisioner.py000066400000000000000000000235071462351563100264750ustar00rootroot00000000000000"""Kernel Provisioner Classes""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import asyncio import os import signal import sys from typing import TYPE_CHECKING, Any, Dict, List, Optional from ..connect import KernelConnectionInfo, LocalPortCache from ..launcher import launch_kernel from ..localinterfaces import is_local_ip, local_ips from .provisioner_base import KernelProvisionerBase class LocalProvisioner(KernelProvisionerBase): # type:ignore[misc] """ :class:`LocalProvisioner` is a concrete class of ABC :py:class:`KernelProvisionerBase` and is the out-of-box default implementation used when no kernel provisioner is specified in the kernel specification (``kernel.json``). It provides functional parity to existing applications by launching the kernel locally and using :class:`subprocess.Popen` to manage its lifecycle. This class is intended to be subclassed for customizing local kernel environments and serve as a reference implementation for other custom provisioners. """ process = None _exit_future = None pid = None pgid = None ip = None ports_cached = False @property def has_process(self) -> bool: return self.process is not None async def poll(self) -> Optional[int]: """Poll the provisioner.""" ret = 0 if self.process: ret = self.process.poll() # type:ignore[unreachable] return ret async def wait(self) -> Optional[int]: """Wait for the provisioner process.""" ret = 0 if self.process: # Use busy loop at 100ms intervals, polling until the process is # not alive. If we find the process is no longer alive, complete # its cleanup via the blocking wait(). Callers are responsible for # issuing calls to wait() using a timeout (see kill()). while await self.poll() is None: # type:ignore[unreachable] await asyncio.sleep(0.1) # Process is no longer alive, wait and clear ret = self.process.wait() # Make sure all the fds get closed. for attr in ["stdout", "stderr", "stdin"]: fid = getattr(self.process, attr) if fid: fid.close() self.process = None # allow has_process to now return False return ret async def send_signal(self, signum: int) -> None: """Sends a signal to the process group of the kernel (this usually includes the kernel and any subprocesses spawned by the kernel). Note that since only SIGTERM is supported on Windows, we will check if the desired signal is for interrupt and apply the applicable code on Windows in that case. """ if self.process: if signum == signal.SIGINT and sys.platform == "win32": # type:ignore[unreachable] from ..win_interrupt import send_interrupt send_interrupt(self.process.win32_interrupt_event) return # Prefer process-group over process if self.pgid and hasattr(os, "killpg"): try: os.killpg(self.pgid, signum) return except OSError: pass # We'll retry sending the signal to only the process below # If we're here, send the signal to the process and let caller handle exceptions self.process.send_signal(signum) return async def kill(self, restart: bool = False) -> None: """Kill the provisioner and optionally restart.""" if self.process: if hasattr(signal, "SIGKILL"): # type:ignore[unreachable] # If available, give preference to signalling the process-group over `kill()`. try: await self.send_signal(signal.SIGKILL) return except OSError: pass try: self.process.kill() except OSError as e: LocalProvisioner._tolerate_no_process(e) async def terminate(self, restart: bool = False) -> None: """Terminate the provisioner and optionally restart.""" if self.process: if hasattr(signal, "SIGTERM"): # type:ignore[unreachable] # If available, give preference to signalling the process group over `terminate()`. try: await self.send_signal(signal.SIGTERM) return except OSError: pass try: self.process.terminate() except OSError as e: LocalProvisioner._tolerate_no_process(e) @staticmethod def _tolerate_no_process(os_error: OSError) -> None: # In Windows, we will get an Access Denied error if the process # has already terminated. Ignore it. if sys.platform == "win32": if os_error.winerror != 5: raise # On Unix, we may get an ESRCH error (or ProcessLookupError instance) if # the process has already terminated. Ignore it. else: from errno import ESRCH if not isinstance(os_error, ProcessLookupError) or os_error.errno != ESRCH: raise async def cleanup(self, restart: bool = False) -> None: """Clean up the resources used by the provisioner and optionally restart.""" if self.ports_cached and not restart: # provisioner is about to be destroyed, return cached ports lpc = LocalPortCache.instance() ports = ( self.connection_info["shell_port"], self.connection_info["iopub_port"], self.connection_info["stdin_port"], self.connection_info["hb_port"], self.connection_info["control_port"], ) for port in ports: if TYPE_CHECKING: assert isinstance(port, int) lpc.return_port(port) async def pre_launch(self, **kwargs: Any) -> Dict[str, Any]: """Perform any steps in preparation for kernel process launch. This includes applying additional substitutions to the kernel launch command and env. It also includes preparation of launch parameters. Returns the updated kwargs. """ # This should be considered temporary until a better division of labor can be defined. km = self.parent if km: if km.transport == "tcp" and not is_local_ip(km.ip): msg = ( "Can only launch a kernel on a local interface. " f"This one is not: {km.ip}." "Make sure that the '*_address' attributes are " "configured properly. " f"Currently valid addresses are: {local_ips()}" ) raise RuntimeError(msg) # build the Popen cmd extra_arguments = kwargs.pop("extra_arguments", []) # write connection file / get default ports # TODO - change when handshake pattern is adopted if km.cache_ports and not self.ports_cached: lpc = LocalPortCache.instance() km.shell_port = lpc.find_available_port(km.ip) km.iopub_port = lpc.find_available_port(km.ip) km.stdin_port = lpc.find_available_port(km.ip) km.hb_port = lpc.find_available_port(km.ip) km.control_port = lpc.find_available_port(km.ip) self.ports_cached = True if "env" in kwargs: jupyter_session = kwargs["env"].get("JPY_SESSION_NAME", "") km.write_connection_file(jupyter_session=jupyter_session) else: km.write_connection_file() self.connection_info = km.get_connection_info() kernel_cmd = km.format_kernel_cmd( extra_arguments=extra_arguments ) # This needs to remain here for b/c else: extra_arguments = kwargs.pop("extra_arguments", []) kernel_cmd = self.kernel_spec.argv + extra_arguments return await super().pre_launch(cmd=kernel_cmd, **kwargs) async def launch_kernel(self, cmd: List[str], **kwargs: Any) -> KernelConnectionInfo: """Launch a kernel with a command.""" scrubbed_kwargs = LocalProvisioner._scrub_kwargs(kwargs) self.process = launch_kernel(cmd, **scrubbed_kwargs) pgid = None if hasattr(os, "getpgid"): try: pgid = os.getpgid(self.process.pid) except OSError: pass self.pid = self.process.pid self.pgid = pgid return self.connection_info @staticmethod def _scrub_kwargs(kwargs: Dict[str, Any]) -> Dict[str, Any]: """Remove any keyword arguments that Popen does not tolerate.""" keywords_to_scrub: List[str] = ["extra_arguments", "kernel_id"] scrubbed_kwargs = kwargs.copy() for kw in keywords_to_scrub: scrubbed_kwargs.pop(kw, None) return scrubbed_kwargs async def get_provisioner_info(self) -> Dict: """Captures the base information necessary for persistence relative to this instance.""" provisioner_info = await super().get_provisioner_info() provisioner_info.update({"pid": self.pid, "pgid": self.pgid, "ip": self.ip}) return provisioner_info async def load_provisioner_info(self, provisioner_info: Dict) -> None: """Loads the base information necessary for persistence relative to this instance.""" await super().load_provisioner_info(provisioner_info) self.pid = provisioner_info["pid"] self.pgid = provisioner_info["pgid"] self.ip = provisioner_info["ip"] jupyter_client-8.6.2/jupyter_client/provisioning/provisioner_base.py000066400000000000000000000233621462351563100263140ustar00rootroot00000000000000"""Kernel Provisioner Classes""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import os from abc import ABC, ABCMeta, abstractmethod from typing import Any, Dict, List, Optional, Union from traitlets.config import Instance, LoggingConfigurable, Unicode from ..connect import KernelConnectionInfo class KernelProvisionerMeta(ABCMeta, type(LoggingConfigurable)): # type: ignore[misc] pass class KernelProvisionerBase( # type:ignore[misc] ABC, LoggingConfigurable, metaclass=KernelProvisionerMeta ): """ Abstract base class defining methods for KernelProvisioner classes. A majority of methods are abstract (requiring implementations via a subclass) while some are optional and others provide implementations common to all instances. Subclasses should be aware of which methods require a call to the superclass. Many of these methods model those of :class:`subprocess.Popen` for parity with previous versions where the kernel process was managed directly. """ # The kernel specification associated with this provisioner kernel_spec: Any = Instance("jupyter_client.kernelspec.KernelSpec", allow_none=True) kernel_id: Union[str, Unicode] = Unicode(None, allow_none=True) connection_info: KernelConnectionInfo = {} @property @abstractmethod def has_process(self) -> bool: """ Returns true if this provisioner is currently managing a process. This property is asserted to be True immediately following a call to the provisioner's :meth:`launch_kernel` method. """ pass @abstractmethod async def poll(self) -> Optional[int]: """ Checks if kernel process is still running. If running, None is returned, otherwise the process's integer-valued exit code is returned. This method is called from :meth:`KernelManager.is_alive`. """ pass @abstractmethod async def wait(self) -> Optional[int]: """ Waits for kernel process to terminate. This method is called from `KernelManager.finish_shutdown()` and `KernelManager.kill_kernel()` when terminating a kernel gracefully or immediately, respectively. """ pass @abstractmethod async def send_signal(self, signum: int) -> None: """ Sends signal identified by signum to the kernel process. This method is called from `KernelManager.signal_kernel()` to send the kernel process a signal. """ pass @abstractmethod async def kill(self, restart: bool = False) -> None: """ Kill the kernel process. This is typically accomplished via a SIGKILL signal, which cannot be caught. This method is called from `KernelManager.kill_kernel()` when terminating a kernel immediately. restart is True if this operation will precede a subsequent launch_kernel request. """ pass @abstractmethod async def terminate(self, restart: bool = False) -> None: """ Terminates the kernel process. This is typically accomplished via a SIGTERM signal, which can be caught, allowing the kernel provisioner to perform possible cleanup of resources. This method is called indirectly from `KernelManager.finish_shutdown()` during a kernel's graceful termination. restart is True if this operation precedes a start launch_kernel request. """ pass @abstractmethod async def launch_kernel(self, cmd: List[str], **kwargs: Any) -> KernelConnectionInfo: """ Launch the kernel process and return its connection information. This method is called from `KernelManager.launch_kernel()` during the kernel manager's start kernel sequence. """ pass @abstractmethod async def cleanup(self, restart: bool = False) -> None: """ Cleanup any resources allocated on behalf of the kernel provisioner. This method is called from `KernelManager.cleanup_resources()` as part of its shutdown kernel sequence. restart is True if this operation precedes a start launch_kernel request. """ pass async def shutdown_requested(self, restart: bool = False) -> None: """ Allows the provisioner to determine if the kernel's shutdown has been requested. This method is called from `KernelManager.request_shutdown()` as part of its shutdown sequence. This method is optional and is primarily used in scenarios where the provisioner may need to perform other operations in preparation for a kernel's shutdown. """ pass async def pre_launch(self, **kwargs: Any) -> Dict[str, Any]: """ Perform any steps in preparation for kernel process launch. This includes applying additional substitutions to the kernel launch command and environment. It also includes preparation of launch parameters. NOTE: Subclass implementations are advised to call this method as it applies environment variable substitutions from the local environment and calls the provisioner's :meth:`_finalize_env()` method to allow each provisioner the ability to cleanup the environment variables that will be used by the kernel. This method is called from `KernelManager.pre_start_kernel()` as part of its start kernel sequence. Returns the (potentially updated) keyword arguments that are passed to :meth:`launch_kernel()`. """ env = kwargs.pop("env", os.environ).copy() env.update(self.__apply_env_substitutions(env)) self._finalize_env(env) kwargs["env"] = env return kwargs async def post_launch(self, **kwargs: Any) -> None: """ Perform any steps following the kernel process launch. This method is called from `KernelManager.post_start_kernel()` as part of its start kernel sequence. """ pass async def get_provisioner_info(self) -> Dict[str, Any]: """ Captures the base information necessary for persistence relative to this instance. This enables applications that subclass `KernelManager` to persist a kernel provisioner's relevant information to accomplish functionality like disaster recovery or high availability by calling this method via the kernel manager's `provisioner` attribute. NOTE: The superclass method must always be called first to ensure proper serialization. """ provisioner_info: Dict[str, Any] = {} provisioner_info["kernel_id"] = self.kernel_id provisioner_info["connection_info"] = self.connection_info return provisioner_info async def load_provisioner_info(self, provisioner_info: Dict) -> None: """ Loads the base information necessary for persistence relative to this instance. The inverse of `get_provisioner_info()`, this enables applications that subclass `KernelManager` to re-establish communication with a provisioner that is managing a (presumably) remote kernel from an entirely different process that the original provisioner. NOTE: The superclass method must always be called first to ensure proper deserialization. """ self.kernel_id = provisioner_info["kernel_id"] self.connection_info = provisioner_info["connection_info"] def get_shutdown_wait_time(self, recommended: float = 5.0) -> float: """ Returns the time allowed for a complete shutdown. This may vary by provisioner. This method is called from `KernelManager.finish_shutdown()` during the graceful phase of its kernel shutdown sequence. The recommended value will typically be what is configured in the kernel manager. """ return recommended def get_stable_start_time(self, recommended: float = 10.0) -> float: """ Returns the expected upper bound for a kernel (re-)start to complete. This may vary by provisioner. The recommended value will typically be what is configured in the kernel restarter. """ return recommended def _finalize_env(self, env: Dict[str, str]) -> None: """ Ensures env is appropriate prior to launch. This method is called from `KernelProvisionerBase.pre_launch()` during the kernel's start sequence. NOTE: Subclasses should be sure to call super()._finalize_env(env) """ if self.kernel_spec.language and self.kernel_spec.language.lower().startswith("python"): # Don't allow PYTHONEXECUTABLE to be passed to kernel process. # If set, it can bork all the things. env.pop("PYTHONEXECUTABLE", None) def __apply_env_substitutions(self, substitution_values: Dict[str, str]) -> Dict[str, str]: """ Walks entries in the kernelspec's env stanza and applies substitutions from current env. This method is called from `KernelProvisionerBase.pre_launch()` during the kernel's start sequence. Returns the substituted list of env entries. NOTE: This method is private and is not intended to be overridden by provisioners. """ substituted_env = {} if self.kernel_spec: from string import Template # For each templated env entry, fill any templated references # matching names of env variables with those values and build # new dict with substitutions. templated_env = self.kernel_spec.env for k, v in templated_env.items(): substituted_env.update({k: Template(v).safe_substitute(substitution_values)}) return substituted_env jupyter_client-8.6.2/jupyter_client/py.typed000066400000000000000000000000001462351563100213220ustar00rootroot00000000000000jupyter_client-8.6.2/jupyter_client/restarter.py000066400000000000000000000133341462351563100222260ustar00rootroot00000000000000"""A basic kernel monitor with autorestarting. This watches a kernel's state using KernelManager.is_alive and auto restarts the kernel if it dies. It is an incomplete base class, and must be subclassed. """ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import time import typing as t from traitlets import Bool, Dict, Float, Instance, Integer, default from traitlets.config.configurable import LoggingConfigurable class KernelRestarter(LoggingConfigurable): """Monitor and autorestart a kernel.""" kernel_manager = Instance("jupyter_client.KernelManager") debug = Bool( False, config=True, help="""Whether to include every poll event in debugging output. Has to be set explicitly, because there will be *a lot* of output. """, ) time_to_dead = Float(3.0, config=True, help="""Kernel heartbeat interval in seconds.""") stable_start_time = Float( 10.0, config=True, help="""The time in seconds to consider the kernel to have completed a stable start up.""", ) restart_limit = Integer( 5, config=True, help="""The number of consecutive autorestarts before the kernel is presumed dead.""", ) random_ports_until_alive = Bool( True, config=True, help="""Whether to choose new random ports when restarting before the kernel is alive.""", ) _restarting = Bool(False) _restart_count = Integer(0) _initial_startup = Bool(True) _last_dead = Float() @default("_last_dead") def _default_last_dead(self) -> float: return time.time() callbacks = Dict() def _callbacks_default(self) -> dict[str, list]: return {"restart": [], "dead": []} def start(self) -> None: """Start the polling of the kernel.""" msg = "Must be implemented in a subclass" raise NotImplementedError(msg) def stop(self) -> None: """Stop the kernel polling.""" msg = "Must be implemented in a subclass" raise NotImplementedError(msg) def add_callback(self, f: t.Callable[..., t.Any], event: str = "restart") -> None: """register a callback to fire on a particular event Possible values for event: 'restart' (default): kernel has died, and will be restarted. 'dead': restart has failed, kernel will be left dead. """ self.callbacks[event].append(f) def remove_callback(self, f: t.Callable[..., t.Any], event: str = "restart") -> None: """unregister a callback to fire on a particular event Possible values for event: 'restart' (default): kernel has died, and will be restarted. 'dead': restart has failed, kernel will be left dead. """ try: self.callbacks[event].remove(f) except ValueError: pass def _fire_callbacks(self, event: t.Any) -> None: """fire our callbacks for a particular event""" for callback in self.callbacks[event]: try: callback() except Exception: self.log.error( "KernelRestarter: %s callback %r failed", event, callback, exc_info=True, ) def poll(self) -> None: if self.debug: self.log.debug("Polling kernel...") if self.kernel_manager.shutting_down: self.log.debug("Kernel shutdown in progress...") return now = time.time() if not self.kernel_manager.is_alive(): self._last_dead = now if self._restarting: self._restart_count += 1 else: self._restart_count = 1 if self._restart_count > self.restart_limit: self.log.warning("KernelRestarter: restart failed") self._fire_callbacks("dead") self._restarting = False self._restart_count = 0 self.stop() else: newports = self.random_ports_until_alive and self._initial_startup self.log.info( "KernelRestarter: restarting kernel (%i/%i), %s random ports", self._restart_count, self.restart_limit, "new" if newports else "keep", ) self._fire_callbacks("restart") self.kernel_manager.restart_kernel(now=True, newports=newports) self._restarting = True else: # Since `is_alive` only tests that the kernel process is alive, it does not # indicate that the kernel has successfully completed startup. To solve this # correctly, we would need to wait for a kernel info reply, but it is not # necessarily appropriate to start a kernel client + channels in the # restarter. Therefore, we use "has been alive continuously for X time" as a # heuristic for a stable start up. # See https://github.com/jupyter/jupyter_client/pull/717 for details. stable_start_time = self.stable_start_time if self.kernel_manager.provisioner: stable_start_time = self.kernel_manager.provisioner.get_stable_start_time( recommended=stable_start_time ) if self._initial_startup and now - self._last_dead >= stable_start_time: self._initial_startup = False if self._restarting and now - self._last_dead >= stable_start_time: self.log.debug("KernelRestarter: restart apparently succeeded") self._restarting = False jupyter_client-8.6.2/jupyter_client/runapp.py000066400000000000000000000111141462351563100215120ustar00rootroot00000000000000"""A Jupyter console app to run files.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import queue import signal import sys import time import typing as t from jupyter_core.application import JupyterApp, base_aliases, base_flags from traitlets import Any, Dict, Float from traitlets.config import catch_config_error from . import __version__ from .consoleapp import JupyterConsoleApp, app_aliases, app_flags OUTPUT_TIMEOUT = 10 # copy flags from mixin: flags = dict(base_flags) # start with mixin frontend flags: frontend_flags_dict = dict(app_flags) # update full dict with frontend flags: flags.update(frontend_flags_dict) # copy flags from mixin aliases = dict(base_aliases) # start with mixin frontend flags frontend_aliases_dict = dict(app_aliases) # load updated frontend flags into full dict aliases.update(frontend_aliases_dict) # get flags&aliases into sets, and remove a couple that # shouldn't be scrubbed from backend flags: frontend_aliases = set(frontend_aliases_dict.keys()) frontend_flags = set(frontend_flags_dict.keys()) class RunApp(JupyterApp, JupyterConsoleApp): # type:ignore[misc] """An Jupyter Console app to run files.""" version = __version__ name = "jupyter run" description = """Run Jupyter kernel code.""" flags = Dict(flags) # type:ignore[assignment] aliases = Dict(aliases) # type:ignore[assignment] frontend_aliases = Any(frontend_aliases) frontend_flags = Any(frontend_flags) kernel_timeout = Float( 60, config=True, help="""Timeout for giving up on a kernel (in seconds). On first connect and restart, the console tests whether the kernel is running and responsive by sending kernel_info_requests. This sets the timeout in seconds for how long the kernel can take before being presumed dead. """, ) def parse_command_line(self, argv: list[str] | None = None) -> None: """Parse the command line arguments.""" super().parse_command_line(argv) self.build_kernel_argv(self.extra_args) self.filenames_to_run = self.extra_args[:] @catch_config_error def initialize(self, argv: list[str] | None = None) -> None: # type:ignore[override] """Initialize the app.""" self.log.debug("jupyter run: initialize...") super().initialize(argv) JupyterConsoleApp.initialize(self) signal.signal(signal.SIGINT, self.handle_sigint) self.init_kernel_info() def handle_sigint(self, *args: t.Any) -> None: """Handle SIGINT.""" if self.kernel_manager: self.kernel_manager.interrupt_kernel() else: self.log.error("Cannot interrupt kernels we didn't start.\n") def init_kernel_info(self) -> None: """Wait for a kernel to be ready, and store kernel info""" timeout = self.kernel_timeout tic = time.time() self.kernel_client.hb_channel.unpause() msg_id = self.kernel_client.kernel_info() while True: try: reply = self.kernel_client.get_shell_msg(timeout=1) except queue.Empty as e: if (time.time() - tic) > timeout: msg = "Kernel didn't respond to kernel_info_request" raise RuntimeError(msg) from e else: if reply["parent_header"].get("msg_id") == msg_id: self.kernel_info = reply["content"] return def start(self) -> None: """Start the application.""" self.log.debug("jupyter run: starting...") super().start() if self.filenames_to_run: for filename in self.filenames_to_run: self.log.debug("jupyter run: executing `%s`", filename) with open(filename) as fp: code = fp.read() reply = self.kernel_client.execute_interactive(code, timeout=OUTPUT_TIMEOUT) return_code = 0 if reply["content"]["status"] == "ok" else 1 if return_code: raise Exception("jupyter-run error running '%s'" % filename) else: code = sys.stdin.read() reply = self.kernel_client.execute_interactive(code, timeout=OUTPUT_TIMEOUT) return_code = 0 if reply["content"]["status"] == "ok" else 1 if return_code: msg = "jupyter-run error running 'stdin'" raise Exception(msg) main = launch_new_instance = RunApp.launch_instance if __name__ == "__main__": main() jupyter_client-8.6.2/jupyter_client/session.py000066400000000000000000001116161462351563100217000ustar00rootroot00000000000000"""Session object for building, serializing, sending, and receiving messages. The Session object supports serialization, HMAC signatures, and metadata on messages. Also defined here are utilities for working with Sessions: * A SessionFactory to be used as a base class for configurables that work with Sessions. * A Message object for convenience that allows attribute-access to the msg dict. """ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import hashlib import hmac import json import logging import os import pickle import pprint import random import typing as t import warnings from binascii import b2a_hex from datetime import datetime, timezone from hmac import compare_digest # We are using compare_digest to limit the surface of timing attacks import zmq.asyncio from tornado.ioloop import IOLoop from traitlets import ( Any, Bool, CBytes, CUnicode, Dict, DottedObjectName, Instance, Integer, Set, TraitError, Unicode, observe, ) from traitlets.config.configurable import Configurable, LoggingConfigurable from traitlets.log import get_logger from traitlets.utils.importstring import import_item from zmq.eventloop.zmqstream import ZMQStream from ._version import protocol_version from .adapter import adapt from .jsonutil import extract_dates, json_clean, json_default, squash_dates PICKLE_PROTOCOL = pickle.DEFAULT_PROTOCOL utc = timezone.utc # ----------------------------------------------------------------------------- # utility functions # ----------------------------------------------------------------------------- def squash_unicode(obj: t.Any) -> t.Any: """coerce unicode back to bytestrings.""" if isinstance(obj, dict): for key in list(obj.keys()): obj[key] = squash_unicode(obj[key]) if isinstance(key, str): obj[squash_unicode(key)] = obj.pop(key) elif isinstance(obj, list): for i, v in enumerate(obj): obj[i] = squash_unicode(v) elif isinstance(obj, str): obj = obj.encode("utf8") return obj # ----------------------------------------------------------------------------- # globals and defaults # ----------------------------------------------------------------------------- # default values for the thresholds: MAX_ITEMS = 64 MAX_BYTES = 1024 # ISO8601-ify datetime objects # allow unicode # disallow nan, because it's not actually valid JSON def json_packer(obj: t.Any) -> bytes: """Convert a json object to a bytes.""" try: return json.dumps( obj, default=json_default, ensure_ascii=False, allow_nan=False, ).encode("utf8", errors="surrogateescape") except (TypeError, ValueError) as e: # Fallback to trying to clean the json before serializing packed = json.dumps( json_clean(obj), default=json_default, ensure_ascii=False, allow_nan=False, ).encode("utf8", errors="surrogateescape") warnings.warn( f"Message serialization failed with:\n{e}\n" "Supporting this message is deprecated in jupyter-client 7, please make " "sure your message is JSON-compliant", stacklevel=2, ) return packed def json_unpacker(s: str | bytes) -> t.Any: """Convert a json bytes or string to an object.""" if isinstance(s, bytes): s = s.decode("utf8", "replace") return json.loads(s) def pickle_packer(o: t.Any) -> bytes: """Pack an object using the pickle module.""" return pickle.dumps(squash_dates(o), PICKLE_PROTOCOL) pickle_unpacker = pickle.loads default_packer = json_packer default_unpacker = json_unpacker DELIM = b"" # singleton dummy tracker, which will always report as done DONE = zmq.MessageTracker() # ----------------------------------------------------------------------------- # Mixin tools for apps that use Sessions # ----------------------------------------------------------------------------- def new_id() -> str: """Generate a new random id. Avoids problematic runtime import in stdlib uuid on Python 2. Returns ------- id string (16 random bytes as hex-encoded text, chunks separated by '-') """ buf = os.urandom(16) return "-".join(b2a_hex(x).decode("ascii") for x in (buf[:4], buf[4:])) def new_id_bytes() -> bytes: """Return new_id as ascii bytes""" return new_id().encode("ascii") session_aliases = { "ident": "Session.session", "user": "Session.username", "keyfile": "Session.keyfile", } session_flags = { "secure": ( {"Session": {"key": new_id_bytes(), "keyfile": ""}}, """Use HMAC digests for authentication of messages. Setting this flag will generate a new UUID to use as the HMAC key. """, ), "no-secure": ( {"Session": {"key": b"", "keyfile": ""}}, """Don't authenticate messages.""", ), } def default_secure(cfg: t.Any) -> None: # pragma: no cover """Set the default behavior for a config environment to be secure. If Session.key/keyfile have not been set, set Session.key to a new random UUID. """ warnings.warn("default_secure is deprecated", DeprecationWarning, stacklevel=2) if "Session" in cfg and ("key" in cfg.Session or "keyfile" in cfg.Session): return # key/keyfile not specified, generate new UUID: cfg.Session.key = new_id_bytes() def utcnow() -> datetime: """Return timezone-aware UTC timestamp""" return datetime.now(utc) # ----------------------------------------------------------------------------- # Classes # ----------------------------------------------------------------------------- class SessionFactory(LoggingConfigurable): """The Base class for configurables that have a Session, Context, logger, and IOLoop. """ logname = Unicode("") @observe("logname") def _logname_changed(self, change: t.Any) -> None: self.log = logging.getLogger(change["new"]) # not configurable: context = Instance("zmq.Context") def _context_default(self) -> zmq.Context: return zmq.Context() session = Instance("jupyter_client.session.Session", allow_none=True) loop = Instance("tornado.ioloop.IOLoop") def _loop_default(self) -> IOLoop: return IOLoop.current() def __init__(self, **kwargs: t.Any) -> None: """Initialize a session factory.""" super().__init__(**kwargs) if self.session is None: # construct the session self.session = Session(**kwargs) class Message: """A simple message object that maps dict keys to attributes. A Message can be created from a dict and a dict from a Message instance simply by calling dict(msg_obj).""" def __init__(self, msg_dict: dict[str, t.Any]) -> None: """Initialize a message.""" dct = self.__dict__ for k, v in dict(msg_dict).items(): if isinstance(v, dict): v = Message(v) # noqa dct[k] = v # Having this iterator lets dict(msg_obj) work out of the box. def __iter__(self) -> t.ItemsView[str, t.Any]: return iter(self.__dict__.items()) # type:ignore[return-value] def __repr__(self) -> str: return repr(self.__dict__) def __str__(self) -> str: return pprint.pformat(self.__dict__) def __contains__(self, k: object) -> bool: return k in self.__dict__ def __getitem__(self, k: str) -> t.Any: return self.__dict__[k] def msg_header( msg_id: str, msg_type: str, username: str, session: Session | str ) -> dict[str, t.Any]: """Create a new message header""" date = utcnow() version = protocol_version return locals() def extract_header(msg_or_header: dict[str, t.Any]) -> dict[str, t.Any]: """Given a message or header, return the header.""" if not msg_or_header: return {} try: # See if msg_or_header is the entire message. h = msg_or_header["header"] except KeyError: try: # See if msg_or_header is just the header h = msg_or_header["msg_id"] except KeyError: raise else: h = msg_or_header if not isinstance(h, dict): h = dict(h) return h class Session(Configurable): """Object for handling serialization and sending of messages. The Session object handles building messages and sending them with ZMQ sockets or ZMQStream objects. Objects can communicate with each other over the network via Session objects, and only need to work with the dict-based IPython message spec. The Session will handle serialization/deserialization, security, and metadata. Sessions support configurable serialization via packer/unpacker traits, and signing with HMAC digests via the key/keyfile traits. Parameters ---------- debug : bool whether to trigger extra debugging statements packer/unpacker : str : 'json', 'pickle' or import_string importstrings for methods to serialize message parts. If just 'json' or 'pickle', predefined JSON and pickle packers will be used. Otherwise, the entire importstring must be used. The functions must accept at least valid JSON input, and output *bytes*. For example, to use msgpack: packer = 'msgpack.packb', unpacker='msgpack.unpackb' pack/unpack : callables You can also set the pack/unpack callables for serialization directly. session : bytes the ID of this Session object. The default is to generate a new UUID. username : unicode username added to message headers. The default is to ask the OS. key : bytes The key used to initialize an HMAC signature. If unset, messages will not be signed or checked. keyfile : filepath The file containing a key. If this is set, `key` will be initialized to the contents of the file. """ debug = Bool(False, config=True, help="""Debug output in the Session""") check_pid = Bool( True, config=True, help="""Whether to check PID to protect against calls after fork. This check can be disabled if fork-safety is handled elsewhere. """, ) packer = DottedObjectName( "json", config=True, help="""The name of the packer for serializing messages. Should be one of 'json', 'pickle', or an import name for a custom callable serializer.""", ) @observe("packer") def _packer_changed(self, change: t.Any) -> None: new = change["new"] if new.lower() == "json": self.pack = json_packer self.unpack = json_unpacker self.unpacker = new elif new.lower() == "pickle": self.pack = pickle_packer self.unpack = pickle_unpacker self.unpacker = new else: self.pack = import_item(str(new)) unpacker = DottedObjectName( "json", config=True, help="""The name of the unpacker for unserializing messages. Only used with custom functions for `packer`.""", ) @observe("unpacker") def _unpacker_changed(self, change: t.Any) -> None: new = change["new"] if new.lower() == "json": self.pack = json_packer self.unpack = json_unpacker self.packer = new elif new.lower() == "pickle": self.pack = pickle_packer self.unpack = pickle_unpacker self.packer = new else: self.unpack = import_item(str(new)) session = CUnicode("", config=True, help="""The UUID identifying this session.""") def _session_default(self) -> str: u = new_id() self.bsession = u.encode("ascii") return u @observe("session") def _session_changed(self, change: t.Any) -> None: self.bsession = self.session.encode("ascii") # bsession is the session as bytes bsession = CBytes(b"") username = Unicode( os.environ.get("USER", "username"), help="""Username for the Session. Default is your system username.""", config=True, ) metadata = Dict( {}, config=True, help="Metadata dictionary, which serves as the default top-level metadata dict for each " "message.", ) # if 0, no adapting to do. adapt_version = Integer(0) # message signature related traits: key = CBytes(config=True, help="""execution key, for signing messages.""") def _key_default(self) -> bytes: return new_id_bytes() @observe("key") def _key_changed(self, change: t.Any) -> None: self._new_auth() signature_scheme = Unicode( "hmac-sha256", config=True, help="""The digest scheme used to construct the message signatures. Must have the form 'hmac-HASH'.""", ) @observe("signature_scheme") def _signature_scheme_changed(self, change: t.Any) -> None: new = change["new"] if not new.startswith("hmac-"): raise TraitError("signature_scheme must start with 'hmac-', got %r" % new) hash_name = new.split("-", 1)[1] try: self.digest_mod = getattr(hashlib, hash_name) except AttributeError as e: raise TraitError("hashlib has no such attribute: %s" % hash_name) from e self._new_auth() digest_mod = Any() def _digest_mod_default(self) -> t.Callable: return hashlib.sha256 auth = Instance(hmac.HMAC, allow_none=True) def _new_auth(self) -> None: if self.key: self.auth = hmac.HMAC(self.key, digestmod=self.digest_mod) else: self.auth = None digest_history = Set() digest_history_size = Integer( 2**16, config=True, help="""The maximum number of digests to remember. The digest history will be culled when it exceeds this value. """, ) keyfile = Unicode("", config=True, help="""path to file containing execution key.""") @observe("keyfile") def _keyfile_changed(self, change: t.Any) -> None: with open(change["new"], "rb") as f: self.key = f.read().strip() # for protecting against sends from forks pid = Integer() # serialization traits: pack = Any(default_packer) # the actual packer function @observe("pack") def _pack_changed(self, change: t.Any) -> None: new = change["new"] if not callable(new): raise TypeError("packer must be callable, not %s" % type(new)) unpack = Any(default_unpacker) # the actual packer function @observe("unpack") def _unpack_changed(self, change: t.Any) -> None: # unpacker is not checked - it is assumed to be new = change["new"] if not callable(new): raise TypeError("unpacker must be callable, not %s" % type(new)) # thresholds: copy_threshold = Integer( 2**16, config=True, help="Threshold (in bytes) beyond which a buffer should be sent without copying.", ) buffer_threshold = Integer( MAX_BYTES, config=True, help="Threshold (in bytes) beyond which an object's buffer should be extracted to avoid " "pickling.", ) item_threshold = Integer( MAX_ITEMS, config=True, help="""The maximum number of items for a container to be introspected for custom serialization. Containers larger than this are pickled outright. """, ) def __init__(self, **kwargs: t.Any) -> None: """create a Session object Parameters ---------- debug : bool whether to trigger extra debugging statements packer/unpacker : str : 'json', 'pickle' or import_string importstrings for methods to serialize message parts. If just 'json' or 'pickle', predefined JSON and pickle packers will be used. Otherwise, the entire importstring must be used. The functions must accept at least valid JSON input, and output *bytes*. For example, to use msgpack: packer = 'msgpack.packb', unpacker='msgpack.unpackb' pack/unpack : callables You can also set the pack/unpack callables for serialization directly. session : unicode (must be ascii) the ID of this Session object. The default is to generate a new UUID. bsession : bytes The session as bytes username : unicode username added to message headers. The default is to ask the OS. key : bytes The key used to initialize an HMAC signature. If unset, messages will not be signed or checked. signature_scheme : str The message digest scheme. Currently must be of the form 'hmac-HASH', where 'HASH' is a hashing function available in Python's hashlib. The default is 'hmac-sha256'. This is ignored if 'key' is empty. keyfile : filepath The file containing a key. If this is set, `key` will be initialized to the contents of the file. """ super().__init__(**kwargs) self._check_packers() self.none = self.pack({}) # ensure self._session_default() if necessary, so bsession is defined: self.session # noqa self.pid = os.getpid() self._new_auth() if not self.key: get_logger().warning( "Message signing is disabled. This is insecure and not recommended!" ) def clone(self) -> Session: """Create a copy of this Session Useful when connecting multiple times to a given kernel. This prevents a shared digest_history warning about duplicate digests due to multiple connections to IOPub in the same process. .. versionadded:: 5.1 """ # make a copy new_session = type(self)() for name in self.traits(): setattr(new_session, name, getattr(self, name)) # fork digest_history new_session.digest_history = set() new_session.digest_history.update(self.digest_history) return new_session message_count = 0 @property def msg_id(self) -> str: message_number = self.message_count self.message_count += 1 return f"{self.session}_{os.getpid()}_{message_number}" def _check_packers(self) -> None: """check packers for datetime support.""" pack = self.pack unpack = self.unpack # check simple serialization msg_list = {"a": [1, "hi"]} try: packed = pack(msg_list) except Exception as e: msg = f"packer '{self.packer}' could not serialize a simple message: {e}" raise ValueError(msg) from e # ensure packed message is bytes if not isinstance(packed, bytes): raise ValueError("message packed to %r, but bytes are required" % type(packed)) # check that unpack is pack's inverse try: unpacked = unpack(packed) assert unpacked == msg_list except Exception as e: msg = ( f"unpacker '{self.unpacker}' could not handle output from packer" f" '{self.packer}': {e}" ) raise ValueError(msg) from e # check datetime support msg_datetime = {"t": utcnow()} try: unpacked = unpack(pack(msg_datetime)) if isinstance(unpacked["t"], datetime): msg = "Shouldn't deserialize to datetime" raise ValueError(msg) except Exception: self.pack = lambda o: pack(squash_dates(o)) self.unpack = lambda s: unpack(s) def msg_header(self, msg_type: str) -> dict[str, t.Any]: """Create a header for a message type.""" return msg_header(self.msg_id, msg_type, self.username, self.session) def msg( self, msg_type: str, content: dict | None = None, parent: dict[str, t.Any] | None = None, header: dict[str, t.Any] | None = None, metadata: dict[str, t.Any] | None = None, ) -> dict[str, t.Any]: """Return the nested message dict. This format is different from what is sent over the wire. The serialize/deserialize methods converts this nested message dict to the wire format, which is a list of message parts. """ msg = {} header = self.msg_header(msg_type) if header is None else header msg["header"] = header msg["msg_id"] = header["msg_id"] msg["msg_type"] = header["msg_type"] msg["parent_header"] = {} if parent is None else extract_header(parent) msg["content"] = {} if content is None else content msg["metadata"] = self.metadata.copy() if metadata is not None: msg["metadata"].update(metadata) return msg def sign(self, msg_list: list) -> bytes: """Sign a message with HMAC digest. If no auth, return b''. Parameters ---------- msg_list : list The [p_header,p_parent,p_content] part of the message list. """ if self.auth is None: return b"" h = self.auth.copy() for m in msg_list: h.update(m) return h.hexdigest().encode() def serialize( self, msg: dict[str, t.Any], ident: list[bytes] | bytes | None = None, ) -> list[bytes]: """Serialize the message components to bytes. This is roughly the inverse of deserialize. The serialize/deserialize methods work with full message lists, whereas pack/unpack work with the individual message parts in the message list. Parameters ---------- msg : dict or Message The next message dict as returned by the self.msg method. Returns ------- msg_list : list The list of bytes objects to be sent with the format:: [ident1, ident2, ..., DELIM, HMAC, p_header, p_parent, p_metadata, p_content, buffer1, buffer2, ...] In this list, the ``p_*`` entities are the packed or serialized versions, so if JSON is used, these are utf8 encoded JSON strings. """ content = msg.get("content", {}) if content is None: content = self.none elif isinstance(content, dict): content = self.pack(content) elif isinstance(content, bytes): # content is already packed, as in a relayed message pass elif isinstance(content, str): # should be bytes, but JSON often spits out unicode content = content.encode("utf8") else: raise TypeError("Content incorrect type: %s" % type(content)) real_message = [ self.pack(msg["header"]), self.pack(msg["parent_header"]), self.pack(msg["metadata"]), content, ] to_send = [] if isinstance(ident, list): # accept list of idents to_send.extend(ident) elif ident is not None: to_send.append(ident) to_send.append(DELIM) signature = self.sign(real_message) to_send.append(signature) to_send.extend(real_message) return to_send def send( self, stream: zmq.sugar.socket.Socket | ZMQStream | None, msg_or_type: dict[str, t.Any] | str, content: dict[str, t.Any] | None = None, parent: dict[str, t.Any] | None = None, ident: bytes | list[bytes] | None = None, buffers: list[bytes] | None = None, track: bool = False, header: dict[str, t.Any] | None = None, metadata: dict[str, t.Any] | None = None, ) -> dict[str, t.Any] | None: """Build and send a message via stream or socket. The message format used by this function internally is as follows: [ident1,ident2,...,DELIM,HMAC,p_header,p_parent,p_content, buffer1,buffer2,...] The serialize/deserialize methods convert the nested message dict into this format. Parameters ---------- stream : zmq.Socket or ZMQStream The socket-like object used to send the data. msg_or_type : str or Message/dict Normally, msg_or_type will be a msg_type unless a message is being sent more than once. If a header is supplied, this can be set to None and the msg_type will be pulled from the header. content : dict or None The content of the message (ignored if msg_or_type is a message). header : dict or None The header dict for the message (ignored if msg_to_type is a message). parent : Message or dict or None The parent or parent header describing the parent of this message (ignored if msg_or_type is a message). ident : bytes or list of bytes The zmq.IDENTITY routing path. metadata : dict or None The metadata describing the message buffers : list or None The already-serialized buffers to be appended to the message. track : bool Whether to track. Only for use with Sockets, because ZMQStream objects cannot track messages. Returns ------- msg : dict The constructed message. """ if not isinstance(stream, zmq.Socket): # ZMQStreams and dummy sockets do not support tracking. track = False if isinstance(stream, zmq.asyncio.Socket): assert stream is not None # type:ignore[unreachable] stream = zmq.Socket.shadow(stream.underlying) if isinstance(msg_or_type, (Message, dict)): # We got a Message or message dict, not a msg_type so don't # build a new Message. msg = msg_or_type buffers = buffers or msg.get("buffers", []) else: msg = self.msg( msg_or_type, content=content, parent=parent, header=header, metadata=metadata, ) if self.check_pid and os.getpid() != self.pid: get_logger().warning("WARNING: attempted to send message from fork\n%s", msg) return None buffers = [] if buffers is None else buffers for idx, buf in enumerate(buffers): if isinstance(buf, memoryview): view = buf else: try: # check to see if buf supports the buffer protocol. view = memoryview(buf) except TypeError as e: emsg = "Buffer objects must support the buffer protocol." raise TypeError(emsg) from e # memoryview.contiguous is new in 3.3, # just skip the check on Python 2 if hasattr(view, "contiguous") and not view.contiguous: # zmq requires memoryviews to be contiguous raise ValueError("Buffer %i (%r) is not contiguous" % (idx, buf)) if self.adapt_version: msg = adapt(msg, self.adapt_version) to_send = self.serialize(msg, ident) to_send.extend(buffers) longest = max([len(s) for s in to_send]) copy = longest < self.copy_threshold if stream and buffers and track and not copy: # only really track when we are doing zero-copy buffers tracker = stream.send_multipart(to_send, copy=False, track=True) elif stream: # use dummy tracker, which will be done immediately tracker = DONE stream.send_multipart(to_send, copy=copy) else: tracker = DONE if self.debug: pprint.pprint(msg) # noqa pprint.pprint(to_send) # noqa pprint.pprint(buffers) # noqa msg["tracker"] = tracker return msg def send_raw( self, stream: zmq.sugar.socket.Socket, msg_list: list, flags: int = 0, copy: bool = True, ident: bytes | list[bytes] | None = None, ) -> None: """Send a raw message via ident path. This method is used to send a already serialized message. Parameters ---------- stream : ZMQStream or Socket The ZMQ stream or socket to use for sending the message. msg_list : list The serialized list of messages to send. This only includes the [p_header,p_parent,p_metadata,p_content,buffer1,buffer2,...] portion of the message. ident : ident or list A single ident or a list of idents to use in sending. """ to_send = [] if isinstance(ident, bytes): ident = [ident] if ident is not None: to_send.extend(ident) to_send.append(DELIM) # Don't include buffers in signature (per spec). to_send.append(self.sign(msg_list[0:4])) to_send.extend(msg_list) if isinstance(stream, zmq.asyncio.Socket): stream = zmq.Socket.shadow(stream.underlying) stream.send_multipart(to_send, flags, copy=copy) def recv( self, socket: zmq.sugar.socket.Socket, mode: int = zmq.NOBLOCK, content: bool = True, copy: bool = True, ) -> tuple[list[bytes] | None, dict[str, t.Any] | None]: """Receive and unpack a message. Parameters ---------- socket : ZMQStream or Socket The socket or stream to use in receiving. Returns ------- [idents], msg [idents] is a list of idents and msg is a nested message dict of same format as self.msg returns. """ if isinstance(socket, ZMQStream): # type:ignore[unreachable] socket = socket.socket # type:ignore[unreachable] if isinstance(socket, zmq.asyncio.Socket): socket = zmq.Socket.shadow(socket.underlying) try: msg_list = socket.recv_multipart(mode, copy=copy) except zmq.ZMQError as e: if e.errno == zmq.EAGAIN: # We can convert EAGAIN to None as we know in this case # recv_multipart won't return None. return None, None else: raise # split multipart message into identity list and message dict # invalid large messages can cause very expensive string comparisons idents, msg_list = self.feed_identities(msg_list, copy) try: return idents, self.deserialize(msg_list, content=content, copy=copy) except Exception as e: # TODO: handle it raise e def feed_identities( self, msg_list: list[bytes] | list[zmq.Message], copy: bool = True ) -> tuple[list[bytes], list[bytes] | list[zmq.Message]]: """Split the identities from the rest of the message. Feed until DELIM is reached, then return the prefix as idents and remainder as msg_list. This is easily broken by setting an IDENT to DELIM, but that would be silly. Parameters ---------- msg_list : a list of Message or bytes objects The message to be split. copy : bool flag determining whether the arguments are bytes or Messages Returns ------- (idents, msg_list) : two lists idents will always be a list of bytes, each of which is a ZMQ identity. msg_list will be a list of bytes or zmq.Messages of the form [HMAC,p_header,p_parent,p_content,buffer1,buffer2,...] and should be unpackable/unserializable via self.deserialize at this point. """ if copy: msg_list = t.cast(t.List[bytes], msg_list) idx = msg_list.index(DELIM) return msg_list[:idx], msg_list[idx + 1 :] else: msg_list = t.cast(t.List[zmq.Message], msg_list) failed = True for idx, m in enumerate(msg_list): # noqa if m.bytes == DELIM: failed = False break if failed: msg = "DELIM not in msg_list" raise ValueError(msg) idents, msg_list = msg_list[:idx], msg_list[idx + 1 :] return [bytes(m.bytes) for m in idents], msg_list def _add_digest(self, signature: bytes) -> None: """add a digest to history to protect against replay attacks""" if self.digest_history_size == 0: # no history, never add digests return self.digest_history.add(signature) if len(self.digest_history) > self.digest_history_size: # threshold reached, cull 10% self._cull_digest_history() def _cull_digest_history(self) -> None: """cull the digest history Removes a randomly selected 10% of the digest history """ current = len(self.digest_history) n_to_cull = max(int(current // 10), current - self.digest_history_size) if n_to_cull >= current: self.digest_history = set() return to_cull = random.sample(tuple(sorted(self.digest_history)), n_to_cull) self.digest_history.difference_update(to_cull) def deserialize( self, msg_list: list[bytes] | list[zmq.Message], content: bool = True, copy: bool = True, ) -> dict[str, t.Any]: """Unserialize a msg_list to a nested message dict. This is roughly the inverse of serialize. The serialize/deserialize methods work with full message lists, whereas pack/unpack work with the individual message parts in the message list. Parameters ---------- msg_list : list of bytes or Message objects The list of message parts of the form [HMAC,p_header,p_parent, p_metadata,p_content,buffer1,buffer2,...]. content : bool (True) Whether to unpack the content dict (True), or leave it packed (False). copy : bool (True) Whether msg_list contains bytes (True) or the non-copying Message objects in each place (False). Returns ------- msg : dict The nested message dict with top-level keys [header, parent_header, content, buffers]. The buffers are returned as memoryviews. """ minlen = 5 message = {} if not copy: # pyzmq didn't copy the first parts of the message, so we'll do it msg_list = t.cast(t.List[zmq.Message], msg_list) msg_list_beginning = [bytes(msg.bytes) for msg in msg_list[:minlen]] msg_list = t.cast(t.List[bytes], msg_list) msg_list = msg_list_beginning + msg_list[minlen:] msg_list = t.cast(t.List[bytes], msg_list) if self.auth is not None: signature = msg_list[0] if not signature: msg = "Unsigned Message" raise ValueError(msg) if signature in self.digest_history: raise ValueError("Duplicate Signature: %r" % signature) if content: # Only store signature if we are unpacking content, don't store if just peeking. self._add_digest(signature) check = self.sign(msg_list[1:5]) if not compare_digest(signature, check): msg = "Invalid Signature: %r" % signature raise ValueError(msg) if not len(msg_list) >= minlen: msg = "malformed message, must have at least %i elements" % minlen raise TypeError(msg) header = self.unpack(msg_list[1]) message["header"] = extract_dates(header) message["msg_id"] = header["msg_id"] message["msg_type"] = header["msg_type"] message["parent_header"] = extract_dates(self.unpack(msg_list[2])) message["metadata"] = self.unpack(msg_list[3]) if content: message["content"] = self.unpack(msg_list[4]) else: message["content"] = msg_list[4] buffers = [memoryview(b) for b in msg_list[5:]] if buffers and buffers[0].shape is None: # force copy to workaround pyzmq #646 msg_list = t.cast(t.List[zmq.Message], msg_list) buffers = [memoryview(bytes(b.bytes)) for b in msg_list[5:]] message["buffers"] = buffers if self.debug: pprint.pprint(message) # noqa # adapt to the current version return adapt(message) def unserialize(self, *args: t.Any, **kwargs: t.Any) -> dict[str, t.Any]: """**DEPRECATED** Use deserialize instead.""" # pragma: no cover warnings.warn( "Session.unserialize is deprecated. Use Session.deserialize.", DeprecationWarning, stacklevel=2, ) return self.deserialize(*args, **kwargs) jupyter_client-8.6.2/jupyter_client/ssh/000077500000000000000000000000001462351563100204325ustar00rootroot00000000000000jupyter_client-8.6.2/jupyter_client/ssh/__init__.py000066400000000000000000000000361462351563100225420ustar00rootroot00000000000000from .tunnel import * # noqa jupyter_client-8.6.2/jupyter_client/ssh/forward.py000066400000000000000000000067501462351563100224600ustar00rootroot00000000000000"""Sample script showing how to do local port forwarding over paramiko. This script connects to the requested SSH server and sets up local port forwarding (the openssh -L option) from a local port through a tunneled connection to a destination reachable from the SSH server machine. """ # # This file is adapted from a paramiko demo, and thus licensed under LGPL 2.1. # Original Copyright (C) 2003-2007 Robey Pointer # Edits Copyright (C) 2010 The IPython Team # # Paramiko is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # # Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with Paramiko; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA. import logging import select import socketserver import typing as t logger = logging.getLogger("ssh") class ForwardServer(socketserver.ThreadingTCPServer): """A server to use for ssh forwarding.""" daemon_threads = True allow_reuse_address = True class Handler(socketserver.BaseRequestHandler): """A handle for server requests.""" @t.no_type_check def handle(self): """Handle a request.""" try: chan = self.ssh_transport.open_channel( "direct-tcpip", (self.chain_host, self.chain_port), self.request.getpeername(), ) except Exception as e: logger.debug( "Incoming request to %s:%d failed: %s" % (self.chain_host, self.chain_port, repr(e)) ) return if chan is None: logger.debug( "Incoming request to %s:%d was rejected by the SSH server." % (self.chain_host, self.chain_port) ) return logger.debug( "Connected! Tunnel open {!r} -> {!r} -> {!r}".format( self.request.getpeername(), chan.getpeername(), (self.chain_host, self.chain_port), ) ) while True: r, w, x = select.select([self.request, chan], [], []) if self.request in r: data = self.request.recv(1024) if len(data) == 0: break chan.send(data) if chan in r: data = chan.recv(1024) if len(data) == 0: break self.request.send(data) chan.close() self.request.close() logger.debug("Tunnel closed ") def forward_tunnel(local_port: int, remote_host: str, remote_port: int, transport: t.Any) -> None: """Forward an ssh tunnel.""" # this is a little convoluted, but lets me configure things for the Handler # object. (SocketServer doesn't give Handlers any way to access the outer # server normally.) class SubHander(Handler): chain_host = remote_host chain_port = remote_port ssh_transport = transport ForwardServer(("127.0.0.1", local_port), SubHander).serve_forever() __all__ = ["forward_tunnel"] jupyter_client-8.6.2/jupyter_client/ssh/tunnel.py000066400000000000000000000327431462351563100223220ustar00rootroot00000000000000"""Basic ssh tunnel utilities, and convenience functions for tunneling zeromq connections. """ # Copyright (C) 2010-2011 IPython Development Team # Copyright (C) 2011- PyZMQ Developers # # Redistributed from IPython under the terms of the BSD License. from __future__ import annotations import atexit import os import re import signal import socket import sys import warnings from getpass import getpass, getuser from multiprocessing import Process from typing import Any, cast try: with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) import paramiko SSHException = paramiko.ssh_exception.SSHException except ImportError: paramiko = None # type:ignore[assignment] class SSHException(Exception): # type:ignore[no-redef] # noqa pass else: from .forward import forward_tunnel try: import pexpect # type: ignore[import-untyped] except ImportError: pexpect = None def select_random_ports(n: int) -> list[int]: """Select and return n random ports that are available.""" ports = [] sockets = [] for _ in range(n): sock = socket.socket() sock.bind(("", 0)) ports.append(sock.getsockname()[1]) sockets.append(sock) for sock in sockets: sock.close() return ports # ----------------------------------------------------------------------------- # Check for passwordless login # ----------------------------------------------------------------------------- _password_pat = re.compile((rb"pass(word|phrase):"), re.IGNORECASE) def try_passwordless_ssh(server: str, keyfile: str | None, paramiko: Any = None) -> Any: """Attempt to make an ssh connection without a password. This is mainly used for requiring password input only once when many tunnels may be connected to the same server. If paramiko is None, the default for the platform is chosen. """ if paramiko is None: paramiko = sys.platform == "win32" f = _try_passwordless_paramiko if paramiko else _try_passwordless_openssh return f(server, keyfile) def _try_passwordless_openssh(server: str, keyfile: str | None) -> bool: """Try passwordless login with shell ssh command.""" if pexpect is None: msg = "pexpect unavailable, use paramiko" raise ImportError(msg) cmd = "ssh -f " + server if keyfile: cmd += " -i " + keyfile cmd += " exit" # pop SSH_ASKPASS from env env = os.environ.copy() env.pop("SSH_ASKPASS", None) ssh_newkey = "Are you sure you want to continue connecting" p = pexpect.spawn(cmd, env=env) while True: try: i = p.expect([ssh_newkey, _password_pat], timeout=0.1) if i == 0: msg = "The authenticity of the host can't be established." raise SSHException(msg) except pexpect.TIMEOUT: continue except pexpect.EOF: return True else: return False def _try_passwordless_paramiko(server: str, keyfile: str | None) -> bool: """Try passwordless login with paramiko.""" if paramiko is None: msg = "Paramiko unavailable, " # type:ignore[unreachable] if sys.platform == "win32": msg += "Paramiko is required for ssh tunneled connections on Windows." else: msg += "use OpenSSH." raise ImportError(msg) username, server, port = _split_server(server) client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.WarningPolicy()) try: client.connect(server, port, username=username, key_filename=keyfile, look_for_keys=True) except paramiko.AuthenticationException: return False else: client.close() return True def tunnel_connection( socket: socket.socket, addr: str, server: str, keyfile: str | None = None, password: str | None = None, paramiko: Any = None, timeout: int = 60, ) -> int: """Connect a socket to an address via an ssh tunnel. This is a wrapper for socket.connect(addr), when addr is not accessible from the local machine. It simply creates an ssh tunnel using the remaining args, and calls socket.connect('tcp://localhost:lport') where lport is the randomly selected local port of the tunnel. """ new_url, tunnel = open_tunnel( addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout, ) socket.connect(new_url) return tunnel def open_tunnel( addr: str, server: str, keyfile: str | None = None, password: str | None = None, paramiko: Any = None, timeout: int = 60, ) -> tuple[str, int]: """Open a tunneled connection from a 0MQ url. For use inside tunnel_connection. Returns ------- (url, tunnel) : (str, object) The 0MQ url that has been forwarded, and the tunnel object """ lport = select_random_ports(1)[0] _, addr = addr.split("://") ip, rport = addr.split(":") rport_int = int(rport) paramiko = sys.platform == "win32" if paramiko is None else paramiko_tunnel tunnelf = paramiko_tunnel if paramiko else openssh_tunnel tunnel = tunnelf( lport, rport_int, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout, ) return "tcp://127.0.0.1:%i" % lport, cast(int, tunnel) def openssh_tunnel( lport: int, rport: int, server: str, remoteip: str = "127.0.0.1", keyfile: str | None = None, password: str | None | bool = None, timeout: int = 60, ) -> int: """Create an ssh tunnel using command-line ssh that connects port lport on this machine to localhost:rport on server. The tunnel will automatically close when not in use, remaining open for a minimum of timeout seconds for an initial connection. This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`, as seen from `server`. keyfile and password may be specified, but ssh config is checked for defaults. Parameters ---------- lport : int local port for connecting to the tunnel from this machine. rport : int port on the remote machine to connect to. server : str The ssh server to connect to. The full ssh server string will be parsed. user@server:port remoteip : str [Default: 127.0.0.1] The remote ip, specifying the destination of the tunnel. Default is localhost, which means that the tunnel would redirect localhost:lport on this machine to localhost:rport on the *server*. keyfile : str; path to public key file This specifies a key to be used in ssh login, default None. Regular default ssh keys will be used without specifying this argument. password : str; Your ssh password to the ssh server. Note that if this is left None, you will be prompted for it if passwordless key based login is unavailable. timeout : int [default: 60] The time (in seconds) after which no activity will result in the tunnel closing. This prevents orphaned tunnels from running forever. """ if pexpect is None: msg = "pexpect unavailable, use paramiko_tunnel" raise ImportError(msg) ssh = "ssh " if keyfile: ssh += "-i " + keyfile if ":" in server: server, port = server.split(":") ssh += " -p %s" % port cmd = f"{ssh} -O check {server}" (output, exitstatus) = pexpect.run(cmd, withexitstatus=True) if not exitstatus: pid = int(output[output.find(b"(pid=") + 5 : output.find(b")")]) cmd = "%s -O forward -L 127.0.0.1:%i:%s:%i %s" % ( ssh, lport, remoteip, rport, server, ) (output, exitstatus) = pexpect.run(cmd, withexitstatus=True) if not exitstatus: atexit.register(_stop_tunnel, cmd.replace("-O forward", "-O cancel", 1)) return pid cmd = "%s -f -S none -L 127.0.0.1:%i:%s:%i %s sleep %i" % ( ssh, lport, remoteip, rport, server, timeout, ) # pop SSH_ASKPASS from env env = os.environ.copy() env.pop("SSH_ASKPASS", None) ssh_newkey = "Are you sure you want to continue connecting" tunnel = pexpect.spawn(cmd, env=env) failed = False while True: try: i = tunnel.expect([ssh_newkey, _password_pat], timeout=0.1) if i == 0: msg = "The authenticity of the host can't be established." raise SSHException(msg) except pexpect.TIMEOUT: continue except pexpect.EOF as e: tunnel.wait() if tunnel.exitstatus: raise RuntimeError("tunnel '%s' failed to start" % (cmd)) from e else: return tunnel.pid else: if failed: warnings.warn("Password rejected, try again", stacklevel=2) password = None if password is None: password = getpass("%s's password: " % (server)) tunnel.sendline(password) failed = True def _stop_tunnel(cmd: Any) -> None: pexpect.run(cmd) def _split_server(server: str) -> tuple[str, str, int]: if "@" in server: username, server = server.split("@", 1) else: username = getuser() if ":" in server: server, port_str = server.split(":") port = int(port_str) else: port = 22 return username, server, port def paramiko_tunnel( lport: int, rport: int, server: str, remoteip: str = "127.0.0.1", keyfile: str | None = None, password: str | None = None, timeout: float = 60, ) -> Process: """launch a tunner with paramiko in a subprocess. This should only be used when shell ssh is unavailable (e.g. Windows). This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`, as seen from `server`. If you are familiar with ssh tunnels, this creates the tunnel: ssh server -L localhost:lport:remoteip:rport keyfile and password may be specified, but ssh config is checked for defaults. Parameters ---------- lport : int local port for connecting to the tunnel from this machine. rport : int port on the remote machine to connect to. server : str The ssh server to connect to. The full ssh server string will be parsed. user@server:port remoteip : str [Default: 127.0.0.1] The remote ip, specifying the destination of the tunnel. Default is localhost, which means that the tunnel would redirect localhost:lport on this machine to localhost:rport on the *server*. keyfile : str; path to public key file This specifies a key to be used in ssh login, default None. Regular default ssh keys will be used without specifying this argument. password : str; Your ssh password to the ssh server. Note that if this is left None, you will be prompted for it if passwordless key based login is unavailable. timeout : int [default: 60] The time (in seconds) after which no activity will result in the tunnel closing. This prevents orphaned tunnels from running forever. """ if paramiko is None: msg = "Paramiko not available" # type:ignore[unreachable] raise ImportError(msg) if password is None and not _try_passwordless_paramiko(server, keyfile): password = getpass("%s's password: " % (server)) p = Process( target=_paramiko_tunnel, args=(lport, rport, server, remoteip), kwargs={"keyfile": keyfile, "password": password}, ) p.daemon = True p.start() return p def _paramiko_tunnel( lport: int, rport: int, server: str, remoteip: str, keyfile: str | None = None, password: str | None = None, ) -> None: """Function for actually starting a paramiko tunnel, to be passed to multiprocessing.Process(target=this), and not called directly. """ username, server, port = _split_server(server) client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.WarningPolicy()) try: client.connect( server, port, username=username, key_filename=keyfile, look_for_keys=True, password=password, ) # except paramiko.AuthenticationException: # if password is None: # password = getpass("%s@%s's password: "%(username, server)) # client.connect(server, port, username=username, password=password) # else: # raise except Exception as e: warnings.warn("*** Failed to connect to %s:%d: %r" % (server, port, e), stacklevel=2) sys.exit(1) # Don't let SIGINT kill the tunnel subprocess signal.signal(signal.SIGINT, signal.SIG_IGN) try: forward_tunnel(lport, remoteip, rport, client.get_transport()) except KeyboardInterrupt: warnings.warn("SIGINT: Port forwarding stopped cleanly", stacklevel=2) sys.exit(0) except Exception as e: warnings.warn("Port forwarding stopped uncleanly: %s" % e, stacklevel=2) sys.exit(255) if sys.platform == "win32": ssh_tunnel = paramiko_tunnel else: ssh_tunnel = openssh_tunnel __all__ = [ "tunnel_connection", "ssh_tunnel", "openssh_tunnel", "paramiko_tunnel", "try_passwordless_ssh", ] jupyter_client-8.6.2/jupyter_client/threaded.py000066400000000000000000000260231462351563100217720ustar00rootroot00000000000000""" Defines a KernelClient that provides thread-safe sockets with async callbacks on message replies. """ import asyncio import atexit import time from concurrent.futures import Future from functools import partial from threading import Thread from typing import Any, Dict, List, Optional import zmq from tornado.ioloop import IOLoop from traitlets import Instance, Type from traitlets.log import get_logger from zmq.eventloop import zmqstream from .channels import HBChannel from .client import KernelClient from .session import Session # Local imports # import ZMQError in top-level namespace, to avoid ugly attribute-error messages # during garbage collection of threads at exit class ThreadedZMQSocketChannel: """A ZMQ socket invoking a callback in the ioloop""" session = None socket = None ioloop = None stream = None _inspect = None def __init__( self, socket: Optional[zmq.Socket], session: Optional[Session], loop: Optional[IOLoop], ) -> None: """Create a channel. Parameters ---------- socket : :class:`zmq.Socket` The ZMQ socket to use. session : :class:`session.Session` The session to use. loop A tornado ioloop to connect the socket to using a ZMQStream """ super().__init__() self.socket = socket self.session = session self.ioloop = loop f: Future = Future() def setup_stream() -> None: try: assert self.socket is not None self.stream = zmqstream.ZMQStream(self.socket, self.ioloop) self.stream.on_recv(self._handle_recv) except Exception as e: f.set_exception(e) else: f.set_result(None) assert self.ioloop is not None self.ioloop.add_callback(setup_stream) # don't wait forever, raise any errors f.result(timeout=10) _is_alive = False def is_alive(self) -> bool: """Whether the channel is alive.""" return self._is_alive def start(self) -> None: """Start the channel.""" self._is_alive = True def stop(self) -> None: """Stop the channel.""" self._is_alive = False def close(self) -> None: """Close the channel.""" if self.stream is not None and self.ioloop is not None: # c.f.Future for threadsafe results f: Future = Future() def close_stream() -> None: try: if self.stream is not None: self.stream.close(linger=0) self.stream = None except Exception as e: f.set_exception(e) else: f.set_result(None) self.ioloop.add_callback(close_stream) # wait for result try: f.result(timeout=5) except Exception as e: log = get_logger() msg = f"Error closing stream {self.stream}: {e}" log.warning(msg, RuntimeWarning, stacklevel=2) if self.socket is not None: try: self.socket.close(linger=0) except Exception: pass self.socket = None def send(self, msg: Dict[str, Any]) -> None: """Queue a message to be sent from the IOLoop's thread. Parameters ---------- msg : message to send This is threadsafe, as it uses IOLoop.add_callback to give the loop's thread control of the action. """ def thread_send() -> None: assert self.session is not None self.session.send(self.stream, msg) assert self.ioloop is not None self.ioloop.add_callback(thread_send) def _handle_recv(self, msg_list: List) -> None: """Callback for stream.on_recv. Unpacks message, and calls handlers with it. """ assert self.ioloop is not None assert self.session is not None ident, smsg = self.session.feed_identities(msg_list) msg = self.session.deserialize(smsg) # let client inspect messages if self._inspect: self._inspect(msg) # type:ignore[unreachable] self.call_handlers(msg) def call_handlers(self, msg: Dict[str, Any]) -> None: """This method is called in the ioloop thread when a message arrives. Subclasses should override this method to handle incoming messages. It is important to remember that this method is called in the thread so that some logic must be done to ensure that the application level handlers are called in the application thread. """ pass def process_events(self) -> None: """Subclasses should override this with a method processing any pending GUI events. """ pass def flush(self, timeout: float = 1.0) -> None: """Immediately processes all pending messages on this channel. This is only used for the IOPub channel. Callers should use this method to ensure that :meth:`call_handlers` has been called for all messages that have been received on the 0MQ SUB socket of this channel. This method is thread safe. Parameters ---------- timeout : float, optional The maximum amount of time to spend flushing, in seconds. The default is one second. """ # We do the IOLoop callback process twice to ensure that the IOLoop # gets to perform at least one full poll. stop_time = time.monotonic() + timeout assert self.ioloop is not None if self.stream is None or self.stream.closed(): # don't bother scheduling flush on a thread if we're closed _msg = "Attempt to flush closed stream" raise OSError(_msg) def flush(f: Any) -> None: try: self._flush() except Exception as e: f.set_exception(e) else: f.set_result(None) for _ in range(2): f: Future = Future() self.ioloop.add_callback(partial(flush, f)) # wait for async flush, re-raise any errors timeout = max(stop_time - time.monotonic(), 0) try: f.result(max(stop_time - time.monotonic(), 0)) except TimeoutError: # flush with a timeout means stop waiting, not raise return def _flush(self) -> None: """Callback for :method:`self.flush`.""" assert self.stream is not None self.stream.flush() self._flushed = True class IOLoopThread(Thread): """Run a pyzmq ioloop in a thread to send and receive messages""" _exiting = False ioloop = None def __init__(self) -> None: """Initialize an io loop thread.""" super().__init__() self.daemon = True @staticmethod @atexit.register def _notice_exit() -> None: # Class definitions can be torn down during interpreter shutdown. # We only need to set _exiting flag if this hasn't happened. if IOLoopThread is not None: IOLoopThread._exiting = True def start(self) -> None: """Start the IOLoop thread Don't return until self.ioloop is defined, which is created in the thread """ self._start_future: Future = Future() Thread.start(self) # wait for start, re-raise any errors self._start_future.result(timeout=10) def run(self) -> None: """Run my loop, ignoring EINTR events in the poller""" try: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) async def assign_ioloop() -> None: self.ioloop = IOLoop.current() loop.run_until_complete(assign_ioloop()) except Exception as e: self._start_future.set_exception(e) else: self._start_future.set_result(None) loop.run_until_complete(self._async_run()) async def _async_run(self) -> None: """Run forever (until self._exiting is set)""" while not self._exiting: await asyncio.sleep(1) def stop(self) -> None: """Stop the channel's event loop and join its thread. This calls :meth:`~threading.Thread.join` and returns when the thread terminates. :class:`RuntimeError` will be raised if :meth:`~threading.Thread.start` is called again. """ self._exiting = True self.join() self.close() self.ioloop = None def __del__(self) -> None: self.close() def close(self) -> None: """Close the io loop thread.""" if self.ioloop is not None: try: self.ioloop.close(all_fds=True) except Exception: pass class ThreadedKernelClient(KernelClient): """A KernelClient that provides thread-safe sockets with async callbacks on message replies.""" @property def ioloop(self) -> Optional[IOLoop]: # type:ignore[override] if self.ioloop_thread: return self.ioloop_thread.ioloop return None ioloop_thread = Instance(IOLoopThread, allow_none=True) def start_channels( self, shell: bool = True, iopub: bool = True, stdin: bool = True, hb: bool = True, control: bool = True, ) -> None: """Start the channels on the client.""" self.ioloop_thread = IOLoopThread() self.ioloop_thread.start() if shell: self.shell_channel._inspect = self._check_kernel_info_reply super().start_channels(shell, iopub, stdin, hb, control) def _check_kernel_info_reply(self, msg: Dict[str, Any]) -> None: """This is run in the ioloop thread when the kernel info reply is received""" if msg["msg_type"] == "kernel_info_reply": self._handle_kernel_info_reply(msg) self.shell_channel._inspect = None def stop_channels(self) -> None: """Stop the channels on the client.""" super().stop_channels() if self.ioloop_thread and self.ioloop_thread.is_alive(): self.ioloop_thread.stop() iopub_channel_class = Type(ThreadedZMQSocketChannel) # type:ignore[arg-type] shell_channel_class = Type(ThreadedZMQSocketChannel) # type:ignore[arg-type] stdin_channel_class = Type(ThreadedZMQSocketChannel) # type:ignore[arg-type] hb_channel_class = Type(HBChannel) # type:ignore[arg-type] control_channel_class = Type(ThreadedZMQSocketChannel) # type:ignore[arg-type] def is_alive(self) -> bool: """Is the kernel process still running?""" if self._hb_channel is not None: # We don't have access to the KernelManager, # so we use the heartbeat. return self._hb_channel.is_beating() # no heartbeat and not local, we can't tell if it's running, # so naively return True return True jupyter_client-8.6.2/jupyter_client/utils.py000066400000000000000000000061521462351563100213530ustar00rootroot00000000000000""" utils: - provides utility wrappers to run asynchronous functions in a blocking environment. - vendor functions from ipython_genutils that should be retired at some point. """ from __future__ import annotations import os from typing import Sequence from jupyter_core.utils import ensure_async, run_sync # noqa: F401 # noqa: F401 from .session import utcnow # noqa def _filefind(filename: str, path_dirs: str | Sequence[str] | None = None) -> str: """Find a file by looking through a sequence of paths. This iterates through a sequence of paths looking for a file and returns the full, absolute path of the first occurrence of the file. If no set of path dirs is given, the filename is tested as is, after running through :func:`expandvars` and :func:`expanduser`. Thus a simple call:: filefind('myfile.txt') will find the file in the current working dir, but:: filefind('~/myfile.txt') Will find the file in the users home directory. This function does not automatically try any paths, such as the cwd or the user's home directory. Parameters ---------- filename : str The filename to look for. path_dirs : str, None or sequence of str The sequence of paths to look for the file in. If None, the filename need to be absolute or be in the cwd. If a string, the string is put into a sequence and the searched. If a sequence, walk through each element and join with ``filename``, calling :func:`expandvars` and :func:`expanduser` before testing for existence. Returns ------- Raises :exc:`IOError` or returns absolute path to file. """ # If paths are quoted, abspath gets confused, strip them... filename = filename.strip('"').strip("'") # If the input is an absolute path, just check it exists if os.path.isabs(filename) and os.path.isfile(filename): return filename if path_dirs is None: path_dirs = ("",) elif isinstance(path_dirs, str): path_dirs = (path_dirs,) for path in path_dirs: if path == ".": path = os.getcwd() # noqa testname = _expand_path(os.path.join(path, filename)) if os.path.isfile(testname): return os.path.abspath(testname) msg = f"File {filename!r} does not exist in any of the search paths: {path_dirs!r}" raise OSError(msg) def _expand_path(s: str) -> str: """Expand $VARS and ~names in a string, like a shell :Examples: In [2]: os.environ['FOO']='test' In [3]: expand_path('variable FOO is $FOO') Out[3]: 'variable FOO is test' """ # This is a pretty subtle hack. When expand user is given a UNC path # on Windows (\\server\share$\%username%), os.path.expandvars, removes # the $ to get (\\server\share\%username%). I think it considered $ # alone an empty var. But, we need the $ to remains there (it indicates # a hidden share). if os.name == "nt": s = s.replace("$\\", "IPYTHON_TEMP") s = os.path.expandvars(os.path.expanduser(s)) if os.name == "nt": s = s.replace("IPYTHON_TEMP", "$\\") return s jupyter_client-8.6.2/jupyter_client/win_interrupt.py000066400000000000000000000027541462351563100231300ustar00rootroot00000000000000"""Use a Windows event to interrupt a child process like SIGINT. The child needs to explicitly listen for this - see ipykernel.parentpoller.ParentPollerWindows for a Python implementation. """ import ctypes from typing import Any def create_interrupt_event() -> Any: """Create an interrupt event handle. The parent process should call this to create the interrupt event that is passed to the child process. It should store this handle and use it with ``send_interrupt`` to interrupt the child process. """ # Create a security attributes struct that permits inheritance of the # handle by new processes. # FIXME: We can clean up this mess by requiring pywin32 for IPython. class SECURITY_ATTRIBUTES(ctypes.Structure): # noqa _fields_ = [ ("nLength", ctypes.c_int), ("lpSecurityDescriptor", ctypes.c_void_p), ("bInheritHandle", ctypes.c_int), ] sa = SECURITY_ATTRIBUTES() sa_p = ctypes.pointer(sa) sa.nLength = ctypes.sizeof(SECURITY_ATTRIBUTES) sa.lpSecurityDescriptor = 0 sa.bInheritHandle = 1 return ctypes.windll.kernel32.CreateEventA( # type:ignore[attr-defined] sa_p, False, False, "", # lpEventAttributes # bManualReset # bInitialState ) # lpName def send_interrupt(interrupt_handle: Any) -> None: """Sends an interrupt event using the specified handle.""" ctypes.windll.kernel32.SetEvent(interrupt_handle) # type:ignore[attr-defined] jupyter_client-8.6.2/pyproject.toml000066400000000000000000000153151462351563100175160ustar00rootroot00000000000000[build-system] requires = ["hatchling>=1.5"] build-backend = "hatchling.build" [project] name = "jupyter_client" dynamic = ["version"] description = "Jupyter protocol implementation and client libraries" keywords = [ "Interactive", "Interpreter", "Shell", "Web",] classifiers = [ "Framework :: Jupyter", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: System Administrators", "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3" ] requires-python = ">=3.8" dependencies = [ "importlib_metadata>=4.8.3;python_version<\"3.10\"", "jupyter_core>=4.12,!=5.0.*", "python-dateutil>=2.8.2", "pyzmq>=23.0", "tornado>=6.2", "traitlets>=5.3", ] [[project.authors]] name = "Jupyter Development Team" email = "jupyter@googlegroups.com" [project.readme] file = "README.md" content-type = "text/markdown" [project.license] file = "LICENSE" [project.urls] Homepage = "https://jupyter.org" Documentation = "https://jupyter-client.readthedocs.io/" Source = "https://github.com/jupyter/jupyter_client" [project.optional-dependencies] test = [ "coverage", "ipykernel>=6.14", "mypy", "paramiko; sys_platform == 'win32'", "pre-commit", "pytest<8.2.0", "pytest-jupyter[client]>=0.4.1", "pytest-cov", "pytest-timeout", ] docs = [ "ipykernel", "myst-parser", "sphinx>=4", "pydata_sphinx_theme", "sphinxcontrib_github_alt", "sphinxcontrib-spelling", "sphinx-autodoc-typehints", ] [project.scripts] jupyter-kernelspec = "jupyter_client.kernelspecapp:KernelSpecApp.launch_instance" jupyter-run = "jupyter_client.runapp:RunApp.launch_instance" jupyter-kernel = "jupyter_client.kernelapp:main" [project.entry-points."jupyter_client.kernel_provisioners"] local-provisioner = "jupyter_client.provisioning:LocalProvisioner" [tool.hatch.version] path = "jupyter_client/_version.py" validate-bump = false [tool.hatch.envs.docs] features = ["docs"] [tool.hatch.envs.docs.scripts] build = "make -C docs html SPHINXOPTS='-W'" api = "sphinx-apidoc -o docs/api -f -E jupyter_client" [tool.hatch.envs.test] features = ["test"] [tool.hatch.envs.test.scripts] test = "python -m pytest -vv {args}" nowarn = "test -W default {args}" [tool.hatch.envs.cov] features = ["test"] dependencies = ["coverage[toml]", "pytest-cov"] [tool.hatch.envs.cov.scripts] test = "python -m pytest -vv --cov jupyter_client --cov-branch --cov-report term-missing:skip-covered {args}" nowarn = "test -W default {args}" [tool.hatch.envs.typing] dependencies = ["pre-commit"] detached = true [tool.hatch.envs.typing.scripts] test = "pre-commit run --all-files --hook-stage manual mypy" [tool.hatch.envs.lint] dependencies = ["pre-commit"] detached = true [tool.hatch.envs.lint.scripts] build = [ "pre-commit run --all-files ruff", "pre-commit run --all-files ruff-format" ] [tool.pytest.ini_options] minversion = "6.0" xfail_strict = true log_cli_level = "info" addopts = [ "-raXs", "--durations=10", "--color=yes", "--doctest-modules", "--showlocals", "--strict-markers", "--strict-config" ] testpaths = [ "jupyter_client", "tests/" ] timeout = 100 # Restore this setting to debug failures timeout_method = "thread" filterwarnings= [ # Fail on warnings "error", # from python-dateutil "ignore:datetime.datetime.utcfromtimestamp:DeprecationWarning", "ignore:datetime.datetime.utcnow:DeprecationWarning", ] [tool.coverage.report] exclude_lines = [ "pragma: no cover", "def __repr__", "if self.debug:", "if settings.DEBUG", "raise AssertionError", "raise NotImplementedError", "if 0:", "if __name__ == .__main__.:", "class .*\bProtocol\\):", "@(abc\\.)?abstractmethod", ] omit = [ "jupyter_client/ssh/forward.py" ] [tool.coverage.run] relative_files = true source = ["jupyter_client"] [tool.mypy] files = "jupyter_client" python_version = "3.8" strict = true disallow_any_generics = false enable_error_code = ["ignore-without-code", "redundant-expr", "truthy-bool"] no_implicit_reexport = false pretty = true show_error_context = true warn_return_any = false warn_unreachable = true [tool.ruff] line-length = 100 [tool.ruff.lint] select = [ "A", "B", "C", "DTZ", "E", "EM", "F", "FBT", "I", "ICN", "N", "PLC", "PLE", "PLW", "Q", "RUF", "S", "SIM", "T", "TID", "UP", "W", "YTT", ] ignore = [ # Allow non-abstract empty methods in abstract base classes "B027", # Ignore McCabe complexity "C901", # Allow boolean positional values in function calls, like `dict.get(... True)` "FBT003", # Use of `assert` detected "S101", # Line too long "E501", # Relative imports are banned "TID252", # Boolean ... in function definition "FBT001", "FBT002", # Module level import not at top of file "E402", # A001/A002/A003 .. is shadowing a python builtin "A001", "A002", "A003", # Possible hardcoded password "S105", "S106", # Q000 Single quotes found but double quotes preferred "Q000", # N806 Variable `B` in function should be lowercase "N806", # SIM105 Use `contextlib.suppress(ValueError)` instead of try-except-pass "SIM105", # SIM108 [*] Use ternary operator "SIM108", # S110 `try`-`except`-`pass` detected, consider logging the exception "S110", # PLW0603 Using the global statement to update "PLW0603", # Mutable class attributes should be annotated with `typing.ClassVar` "RUF012", # non-pep585-annotation "UP006", # non-pep604-annotation "UP007", ] unfixable = [ # Don't touch print statements "T201", # Don't touch noqa lines "RUF100", # Imported but unused "F401", ] [tool.ruff.lint.per-file-ignores] # B011 Do not call assert False since python -O removes these calls # F841 local variable 'foo' is assigned to but never used # C408 Unnecessary `dict` call # E402 Module level import not at top of file # T201 `print` found # B007 Loop control variable `i` not used within the loop body. # N802 Function name `assertIn` should be lowercase # EM101 Exception must not use a string literal, assign to variable first # PLR2004 Magic value used in comparison # S603 `subprocess` call: check for execution of untrusted input "tests/*" = ["B011", "F841", "C408", "E402", "T201", "B007", "N802", "EM101", "EM102", "PLR2004", "S603"] # T201 `print` found "*app.py" = ["T201"] # F401 `._version.__version__` imported but unused "jupyter_client/__init__.py" = ["F401"] [tool.interrogate] ignore-init-module=true ignore-private=true ignore-semiprivate=true ignore-property-decorators=true ignore-nested-functions=true ignore-nested-classes=true fail-under=90 exclude = ["docs", "test"] [tool.repo-review] ignore = ["GH102"] jupyter_client-8.6.2/tests/000077500000000000000000000000001462351563100157375ustar00rootroot00000000000000jupyter_client-8.6.2/tests/__init__.py000066400000000000000000000000001462351563100200360ustar00rootroot00000000000000jupyter_client-8.6.2/tests/conftest.py000066400000000000000000000006061462351563100201400ustar00rootroot00000000000000import asyncio import os if os.name == "nt": asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # type:ignore import pytest # Must be set before importing from `jupyter_core`. os.environ["JUPYTER_PLATFORM_DIRS"] = "1" pytest_plugins = ["pytest_jupyter", "pytest_jupyter.jupyter_client"] @pytest.fixture(autouse=True) def setup_environ(jp_environ): pass jupyter_client-8.6.2/tests/problemkernel.py000066400000000000000000000023301462351563100211500ustar00rootroot00000000000000"""Test kernel for signalling subprocesses""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import os import time from ipykernel.displayhook import ZMQDisplayHook from ipykernel.kernelapp import IPKernelApp from ipykernel.kernelbase import Kernel class ProblemTestKernel(Kernel): """Kernel for testing kernel problems""" implementation = "problemtest" implementation_version = "0.0" banner = "" class ProblemTestApp(IPKernelApp): kernel_class = ProblemTestKernel # type:ignore[assignment] def init_io(self): # Overridden to disable stdout/stderr capture self.displayhook = ZMQDisplayHook(self.session, self.iopub_socket) def init_sockets(self): if os.environ.get("FAIL_ON_START") == "1": # Simulates e.g. a port binding issue (Address already in use) raise RuntimeError("Failed for testing purposes") return super().init_sockets() if __name__ == "__main__": # make startup artificially slow, # so that we exercise client logic for slow-starting kernels startup_delay = int(os.environ.get("STARTUP_DELAY", "2")) time.sleep(startup_delay) ProblemTestApp.launch_instance() jupyter_client-8.6.2/tests/signalkernel.py000066400000000000000000000047661462351563100210040ustar00rootroot00000000000000"""Test kernel for signalling subprocesses""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import os import signal import time from subprocess import PIPE, Popen from ipykernel.displayhook import ZMQDisplayHook from ipykernel.kernelapp import IPKernelApp from ipykernel.kernelbase import Kernel class SignalTestKernel(Kernel): """Kernel for testing subprocess signaling""" implementation = "signaltest" implementation_version = "0.0" banner = "" def __init__(self, **kwargs): kwargs.pop("user_ns", None) super().__init__(**kwargs) self.children = [] if os.environ.get("NO_SIGTERM_REPLY", None) == "1": signal.signal(signal.SIGTERM, signal.SIG_IGN) async def shutdown_request(self, stream, ident, parent): if os.environ.get("NO_SHUTDOWN_REPLY") != "1": await super().shutdown_request(stream, ident, parent) def do_execute( self, code, silent, store_history=True, user_expressions=None, allow_stdin=False ): code = code.strip() reply: dict = { "status": "ok", "user_expressions": {}, } if code == "start": child = Popen(["bash", "-i", "-c", "sleep 30"], stderr=PIPE) # noqa self.children.append(child) reply["user_expressions"]["pid"] = self.children[-1].pid elif code == "check": reply["user_expressions"]["poll"] = [child.poll() for child in self.children] elif code == "env": reply["user_expressions"]["env"] = os.getenv("TEST_VARS", "") elif code == "sleep": try: time.sleep(10) except KeyboardInterrupt: reply["user_expressions"]["interrupted"] = True else: reply["user_expressions"]["interrupted"] = False else: reply["status"] = "error" reply["ename"] = "Error" reply["evalue"] = code reply["traceback"] = ["no such command: %s" % code] return reply class SignalTestApp(IPKernelApp): kernel_class = SignalTestKernel # type:ignore[assignment] def init_io(self): # Overridden to disable stdout/stderr capture self.displayhook = ZMQDisplayHook(self.session, self.iopub_socket) if __name__ == "__main__": # make startup artificially slow, # so that we exercise client logic for slow-starting kernels time.sleep(2) SignalTestApp.launch_instance() jupyter_client-8.6.2/tests/test_adapter.py000066400000000000000000000341441462351563100207760ustar00rootroot00000000000000"""Tests for adapting Jupyter msg spec versions""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import copy import json from unittest import TestCase from jupyter_client.adapter import V4toV5, adapt, code_to_line from jupyter_client.session import Session def test_default_version(): s = Session() msg = s.msg("msg_type") msg["header"].pop("version") original = copy.deepcopy(msg) adapted = adapt(original) assert adapted["header"]["version"] == V4toV5.version def test_code_to_line_no_code(): line, pos = code_to_line("", 0) assert line == "" assert pos == 0 class AdapterTest(TestCase): def setUp(self): self.session = Session() def adapt(self, msg, version=None): original = copy.deepcopy(msg) adapted = adapt(msg, version or self.to_version) # type:ignore return original, adapted def check_header(self, msg): pass class V4toV5TestCase(AdapterTest): from_version = 4 to_version = 5 def msg(self, msg_type, content): """Create a v4 msg (same as v5, minus version header)""" msg = self.session.msg(msg_type, content) msg["header"].pop("version") return msg def test_same_version(self): msg = self.msg("execute_result", content={"status": "ok"}) original, adapted = self.adapt(msg, self.from_version) self.assertEqual(original, adapted) def test_no_adapt(self): msg = self.msg("input_reply", {"value": "some text"}) v4, v5 = self.adapt(msg) self.assertEqual(v5["header"]["version"], V4toV5.version) v5["header"].pop("version") self.assertEqual(v4, v5) def test_rename_type(self): for v5_type, v4_type in [ ("execute_result", "pyout"), ("execute_input", "pyin"), ("error", "pyerr"), ]: msg = self.msg(v4_type, {"key": "value"}) v4, v5 = self.adapt(msg) self.assertEqual(v5["header"]["version"], V4toV5.version) self.assertEqual(v5["header"]["msg_type"], v5_type) self.assertEqual(v4["content"], v5["content"]) def test_execute_request(self): msg = self.msg( "execute_request", { "code": "a=5", "silent": False, "user_expressions": {"a": "apple"}, "user_variables": ["b"], }, ) v4, v5 = self.adapt(msg) self.assertEqual(v4["header"]["msg_type"], v5["header"]["msg_type"]) v4c = v4["content"] v5c = v5["content"] self.assertEqual(v5c["user_expressions"], {"a": "apple", "b": "b"}) self.assertNotIn("user_variables", v5c) self.assertEqual(v5c["code"], v4c["code"]) def test_execute_reply(self): msg = self.msg( "execute_reply", { "status": "ok", "execution_count": 7, "user_variables": {"a": 1}, "user_expressions": {"a+a": 2}, "payload": [{"source": "page", "text": "blah"}], }, ) v4, v5 = self.adapt(msg) v5c = v5["content"] self.assertNotIn("user_variables", v5c) self.assertEqual(v5c["user_expressions"], {"a": 1, "a+a": 2}) self.assertEqual(v5c["payload"], [{"source": "page", "data": {"text/plain": "blah"}}]) def test_complete_request(self): msg = self.msg( "complete_request", { "text": "a.is", "line": "foo = a.is", "block": None, "cursor_pos": 10, }, ) v4, v5 = self.adapt(msg) v4c = v4["content"] v5c = v5["content"] for key in ("text", "line", "block"): self.assertNotIn(key, v5c) self.assertEqual(v5c["cursor_pos"], v4c["cursor_pos"]) self.assertEqual(v5c["code"], v4c["line"]) def test_complete_reply(self): msg = self.msg( "complete_reply", { "matched_text": "a.is", "matches": [ "a.isalnum", "a.isalpha", "a.isdigit", "a.islower", ], }, ) v4, v5 = self.adapt(msg) v4c = v4["content"] v5c = v5["content"] self.assertEqual(v5c["matches"], v4c["matches"]) self.assertEqual(v5c["metadata"], {}) self.assertEqual(v5c["cursor_start"], -4) self.assertEqual(v5c["cursor_end"], None) def test_object_info_request(self): msg = self.msg( "object_info_request", { "oname": "foo", "detail_level": 1, }, ) v4, v5 = self.adapt(msg) self.assertEqual(v5["header"]["msg_type"], "inspect_request") v4c = v4["content"] v5c = v5["content"] self.assertEqual(v5c["code"], v4c["oname"]) self.assertEqual(v5c["cursor_pos"], len(v4c["oname"])) self.assertEqual(v5c["detail_level"], v4c["detail_level"]) def test_object_info_reply(self): msg = self.msg( "object_info_reply", { "name": "foo", "found": True, "status": "ok", "definition": "foo(a=5)", "docstring": "the docstring", }, ) v4, v5 = self.adapt(msg) self.assertEqual(v5["header"]["msg_type"], "inspect_reply") v4c = v4["content"] v5c = v5["content"] self.assertEqual(sorted(v5c), ["data", "found", "metadata", "status"]) text = v5c["data"]["text/plain"] self.assertEqual(text, "\n".join([v4c["definition"], v4c["docstring"]])) def test_object_info_reply_not_found(self): msg = self.msg( "object_info_reply", { "name": "foo", "found": False, }, ) v4, v5 = self.adapt(msg) self.assertEqual(v5["header"]["msg_type"], "inspect_reply") v4["content"] v5c = v5["content"] self.assertEqual( v5c, { "status": "ok", "found": False, "data": {}, "metadata": {}, }, ) def test_kernel_info_reply(self): msg = self.msg( "kernel_info_reply", { "language": "python", "language_version": [2, 8, 0], "ipython_version": [1, 2, 3], }, ) v4, v5 = self.adapt(msg) v4["content"] v5c = v5["content"] self.assertEqual( v5c, { "protocol_version": "4.1", "implementation": "ipython", "implementation_version": "1.2.3", "language_info": { "name": "python", "version": "2.8.0", }, "banner": "", }, ) # iopub channel def test_display_data(self): jsondata = dict(a=5) msg = self.msg( "display_data", { "data": { "text/plain": "some text", "application/json": json.dumps(jsondata), }, "metadata": {"text/plain": {"key": "value"}}, }, ) v4, v5 = self.adapt(msg) v4c = v4["content"] v5c = v5["content"] self.assertEqual(v5c["metadata"], v4c["metadata"]) self.assertEqual(v5c["data"]["text/plain"], v4c["data"]["text/plain"]) self.assertEqual(v5c["data"]["application/json"], jsondata) # stdin channel def test_input_request(self): msg = self.msg("input_request", {"prompt": "$>"}) v4, v5 = self.adapt(msg) self.assertEqual(v5["content"]["prompt"], v4["content"]["prompt"]) self.assertFalse(v5["content"]["password"]) class V5toV4TestCase(AdapterTest): from_version = 5 to_version = 4 def msg(self, msg_type, content): return self.session.msg(msg_type, content) def test_same_version(self): msg = self.msg("execute_result", content={"status": "ok"}) original, adapted = self.adapt(msg, self.from_version) self.assertEqual(original, adapted) def test_no_adapt(self): msg = self.msg("input_reply", {"value": "some text"}) v5, v4 = self.adapt(msg) self.assertNotIn("version", v4["header"]) v5["header"].pop("version") self.assertEqual(v4, v5) def test_rename_type(self): for v5_type, v4_type in [ ("execute_result", "pyout"), ("execute_input", "pyin"), ("error", "pyerr"), ]: msg = self.msg(v5_type, {"key": "value"}) v5, v4 = self.adapt(msg) self.assertEqual(v4["header"]["msg_type"], v4_type) assert "version" not in v4["header"] self.assertEqual(v4["content"], v5["content"]) def test_execute_request(self): msg = self.msg( "execute_request", { "code": "a=5", "silent": False, "user_expressions": {"a": "apple"}, }, ) v5, v4 = self.adapt(msg) self.assertEqual(v4["header"]["msg_type"], v5["header"]["msg_type"]) v4c = v4["content"] v5c = v5["content"] self.assertEqual(v4c["user_variables"], []) self.assertEqual(v5c["code"], v4c["code"]) def test_complete_request(self): msg = self.msg( "complete_request", { "code": "def foo():\n a.is\nfoo()", "cursor_pos": 19, }, ) v5, v4 = self.adapt(msg) v4c = v4["content"] v5c = v5["content"] self.assertNotIn("code", v4c) self.assertEqual(v4c["line"], v5c["code"].splitlines(True)[1]) self.assertEqual(v4c["cursor_pos"], 8) self.assertEqual(v4c["text"], "") self.assertEqual(v4c["block"], None) def test_complete_reply(self): msg = self.msg( "complete_reply", { "cursor_start": 10, "cursor_end": 14, "matches": [ "a.isalnum", "a.isalpha", "a.isdigit", "a.islower", ], "metadata": {}, }, ) v5, v4 = self.adapt(msg) v4c = v4["content"] v5c = v5["content"] self.assertEqual(v4c["matched_text"], "a.is") self.assertEqual(v4c["matches"], v5c["matches"]) def test_inspect_request(self): msg = self.msg( "inspect_request", { "code": "def foo():\n apple\nbar()", "cursor_pos": 18, "detail_level": 1, }, ) v5, v4 = self.adapt(msg) self.assertEqual(v4["header"]["msg_type"], "object_info_request") v4c = v4["content"] v5c = v5["content"] self.assertEqual(v4c["oname"], "apple") self.assertEqual(v5c["detail_level"], v4c["detail_level"]) def test_inspect_request_token(self): line = "something(range(10), kwarg=smth) ; xxx.xxx.xxx( firstarg, rand(234,23), kwarg1=2," msg = self.msg( "inspect_request", { "code": line, "cursor_pos": len(line) - 1, "detail_level": 1, }, ) v5, v4 = self.adapt(msg) self.assertEqual(v4["header"]["msg_type"], "object_info_request") v4c = v4["content"] v5c = v5["content"] self.assertEqual(v4c["oname"], "xxx.xxx.xxx") self.assertEqual(v5c["detail_level"], v4c["detail_level"]) def test_inspect_reply(self): msg = self.msg( "inspect_reply", { "name": "foo", "found": True, "data": {"text/plain": "some text"}, "metadata": {}, }, ) v5, v4 = self.adapt(msg) self.assertEqual(v4["header"]["msg_type"], "object_info_reply") v4c = v4["content"] v5["content"] self.assertEqual(sorted(v4c), ["found", "oname"]) self.assertEqual(v4c["found"], False) def test_kernel_info_reply(self): msg = self.msg( "kernel_info_reply", { "protocol_version": "5.0", "implementation": "ipython", "implementation_version": "1.2.3", "language_info": { "name": "python", "version": "2.8.0", "mimetype": "text/x-python", }, "banner": "the banner", }, ) v5, v4 = self.adapt(msg) v4c = v4["content"] v5c = v5["content"] v5c["language_info"] self.assertEqual( v4c, { "protocol_version": [5, 0], "language": "python", "language_version": [2, 8, 0], "ipython_version": [1, 2, 3], }, ) # iopub channel def test_display_data(self): jsondata = dict(a=5) msg = self.msg( "display_data", { "data": { "text/plain": "some text", "application/json": jsondata, }, "metadata": {"text/plain": {"key": "value"}}, }, ) v5, v4 = self.adapt(msg) v4c = v4["content"] v5c = v5["content"] self.assertEqual(v5c["metadata"], v4c["metadata"]) self.assertEqual(v5c["data"]["text/plain"], v4c["data"]["text/plain"]) self.assertEqual(v4c["data"]["application/json"], json.dumps(jsondata)) # stdin channel def test_input_request(self): msg = self.msg("input_request", {"prompt": "$>", "password": True}) v5, v4 = self.adapt(msg) self.assertEqual(v5["content"]["prompt"], v4["content"]["prompt"]) self.assertNotIn("password", v4["content"]) jupyter_client-8.6.2/tests/test_client.py000066400000000000000000000234021462351563100206270ustar00rootroot00000000000000"""Tests for the KernelClient""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import os import platform import sys from threading import Event from unittest import TestCase, mock import pytest from IPython.utils.capture import capture_output from traitlets import DottedObjectName, Type from jupyter_client.client import validate_string_dict from jupyter_client.kernelspec import KernelSpecManager, NoSuchKernel from jupyter_client.manager import KernelManager, start_new_async_kernel, start_new_kernel from jupyter_client.threaded import ThreadedKernelClient, ThreadedZMQSocketChannel TIMEOUT = 30 pjoin = os.path.join class TestKernelClient(TestCase): def setUp(self): try: KernelSpecManager().get_kernel_spec("echo") except NoSuchKernel: pytest.skip() self.km, self.kc = start_new_kernel(kernel_name="echo") def tearDown(self): self.km.shutdown_kernel() self.kc.stop_channels() return super().tearDown() def test_execute_interactive(self): kc = self.kc with capture_output() as io: reply = kc.execute_interactive("print('hello')", timeout=TIMEOUT) assert "hello" in io.stdout assert reply["content"]["status"] == "ok" def _check_reply(self, reply_type, reply): self.assertIsInstance(reply, dict) self.assertEqual(reply["header"]["msg_type"], reply_type + "_reply") self.assertEqual(reply["parent_header"]["msg_type"], reply_type + "_request") def test_history(self): kc = self.kc msg_id = kc.history(session=0) self.assertIsInstance(msg_id, str) reply = kc.history(session=0, reply=True, timeout=TIMEOUT) self._check_reply("history", reply) def test_inspect(self): kc = self.kc msg_id = kc.inspect("who cares") self.assertIsInstance(msg_id, str) reply = kc.inspect("code", reply=True, timeout=TIMEOUT) self._check_reply("inspect", reply) def test_complete(self): kc = self.kc msg_id = kc.complete("who cares") self.assertIsInstance(msg_id, str) reply = kc.complete("code", reply=True, timeout=TIMEOUT) self._check_reply("complete", reply) def test_kernel_info(self): kc = self.kc msg_id = kc.kernel_info() self.assertIsInstance(msg_id, str) reply = kc.kernel_info(reply=True, timeout=TIMEOUT) self._check_reply("kernel_info", reply) def test_comm_info(self): kc = self.kc msg_id = kc.comm_info() self.assertIsInstance(msg_id, str) reply = kc.comm_info(reply=True, timeout=TIMEOUT) self._check_reply("comm_info", reply) def test_shutdown(self): kc = self.kc reply = kc.shutdown(reply=True, timeout=TIMEOUT) self._check_reply("shutdown", reply) def test_shutdown_id(self): kc = self.kc msg_id = kc.shutdown() self.assertIsInstance(msg_id, str) @pytest.fixture def kc(jp_asyncio_loop): try: KernelSpecManager().get_kernel_spec("echo") except NoSuchKernel: pytest.skip() async def start(): return await start_new_async_kernel(kernel_name="echo") km, kc = jp_asyncio_loop.run_until_complete(start()) yield kc async def stop(): await km.shutdown_kernel() jp_asyncio_loop.run_until_complete(stop()) kc.stop_channels() class TestAsyncKernelClient: async def test_execute_interactive(self, kc): reply = await kc.execute_interactive("hello", timeout=TIMEOUT) assert reply["content"]["status"] == "ok" def _check_reply(self, reply_type, reply): assert isinstance(reply, dict) assert reply["header"]["msg_type"] == reply_type + "_reply" assert reply["parent_header"]["msg_type"] == reply_type + "_request" @pytest.mark.skipif( sys.platform != "linux" or platform.python_implementation().lower() == "pypy", reason="only works with cpython on ubuntu in ci", ) async def test_input_request(self, kc): with mock.patch("builtins.input", return_value="test\n"): reply = await kc.execute_interactive("a = input()", timeout=TIMEOUT) assert reply["content"]["status"] == "ok" async def test_output_hook(self, kc): called = False def output_hook(msg): nonlocal called if msg["header"]["msg_type"] == "stream": called = True reply = await kc.execute_interactive( "print('hello')", timeout=TIMEOUT, output_hook=output_hook ) assert reply["content"]["status"] == "ok" assert called async def test_history(self, kc): msg_id = kc.history(session=0) assert isinstance(msg_id, str) reply = await kc.history(session=0, reply=True, timeout=TIMEOUT) self._check_reply("history", reply) async def test_inspect(self, kc): msg_id = kc.inspect("who cares") assert isinstance(msg_id, str) reply = await kc.inspect("code", reply=True, timeout=TIMEOUT) self._check_reply("inspect", reply) async def test_complete(self, kc): msg_id = kc.complete("who cares") assert isinstance(msg_id, str) reply = await kc.complete("code", reply=True, timeout=TIMEOUT) self._check_reply("complete", reply) async def test_is_complete(self, kc): msg_id = kc.is_complete("who cares") assert isinstance(msg_id, str) reply = await kc.is_complete("code", reply=True, timeout=TIMEOUT) self._check_reply("is_complete", reply) async def test_kernel_info(self, kc): msg_id = kc.kernel_info() assert isinstance(msg_id, str) reply = await kc.kernel_info(reply=True, timeout=TIMEOUT) self._check_reply("kernel_info", reply) async def test_comm_info(self, kc): msg_id = kc.comm_info() assert isinstance(msg_id, str) reply = await kc.comm_info(reply=True, timeout=TIMEOUT) self._check_reply("comm_info", reply) async def test_shutdown(self, kc): reply = await kc.shutdown(reply=True, timeout=TIMEOUT) self._check_reply("shutdown", reply) async def test_shutdown_id(self, kc): msg_id = kc.shutdown() assert isinstance(msg_id, str) class ThreadedKernelManager(KernelManager): client_class = DottedObjectName("tests.test_client.CustomThreadedKernelClient") class CustomThreadedZMQSocketChannel(ThreadedZMQSocketChannel): last_msg = None def __init__(self, *args, **kwargs): self.msg_recv = Event() super().__init__(*args, **kwargs) def call_handlers(self, msg): self.last_msg = msg self.msg_recv.set() class CustomThreadedKernelClient(ThreadedKernelClient): iopub_channel_class = Type(CustomThreadedZMQSocketChannel) # type:ignore[arg-type] shell_channel_class = Type(CustomThreadedZMQSocketChannel) # type:ignore[arg-type] stdin_channel_class = Type(CustomThreadedZMQSocketChannel) # type:ignore[arg-type] control_channel_class = Type(CustomThreadedZMQSocketChannel) # type:ignore[arg-type] class TestThreadedKernelClient(TestKernelClient): def setUp(self): try: KernelSpecManager().get_kernel_spec("echo") except NoSuchKernel: pytest.skip() self.km = km = ThreadedKernelManager(kernel_name="echo") km.start_kernel() self.kc = kc = km.client() kc.start_channels() def tearDown(self): self.km.shutdown_kernel() self.kc.stop_channels() def _check_reply(self, reply_type, reply): self.assertIsInstance(reply, dict) self.assertEqual(reply["header"]["msg_type"], reply_type + "_reply") self.assertEqual(reply["parent_header"]["msg_type"], reply_type + "_request") def test_execute_interactive(self): pytest.skip("Not supported") def test_history(self): kc = self.kc msg_id = kc.history(session=0) self.assertIsInstance(msg_id, str) kc.history(session=0) kc.shell_channel.msg_recv.wait() reply = kc.shell_channel.last_msg self._check_reply("history", reply) def test_inspect(self): kc = self.kc msg_id = kc.inspect("who cares") self.assertIsInstance(msg_id, str) kc.inspect("code") kc.shell_channel.msg_recv.wait() reply = kc.shell_channel.last_msg self._check_reply("inspect", reply) def test_complete(self): kc = self.kc msg_id = kc.complete("who cares") self.assertIsInstance(msg_id, str) kc.complete("code") kc.shell_channel.msg_recv.wait() reply = kc.shell_channel.last_msg self._check_reply("complete", reply) def test_kernel_info(self): kc = self.kc msg_id = kc.kernel_info() self.assertIsInstance(msg_id, str) kc.kernel_info() kc.shell_channel.msg_recv.wait() reply = kc.shell_channel.last_msg self._check_reply("kernel_info", reply) def test_comm_info(self): kc = self.kc msg_id = kc.comm_info() self.assertIsInstance(msg_id, str) kc.shell_channel.msg_recv.wait() reply = kc.shell_channel.last_msg self._check_reply("comm_info", reply) def test_shutdown(self): kc = self.kc kc.shutdown() kc.control_channel.msg_recv.wait() reply = kc.control_channel.last_msg self._check_reply("shutdown", reply) def test_shutdown_id(self): kc = self.kc msg_id = kc.shutdown() self.assertIsInstance(msg_id, str) def test_validate_string_dict(): with pytest.raises(ValueError): validate_string_dict(dict(a=1)) # type:ignore with pytest.raises(ValueError): validate_string_dict({1: "a"}) # type:ignore jupyter_client-8.6.2/tests/test_connect.py000066400000000000000000000222521462351563100210040ustar00rootroot00000000000000"""Tests for kernel connection utilities""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import json import os from tempfile import TemporaryDirectory import pytest from jupyter_core.application import JupyterApp from jupyter_core.paths import jupyter_runtime_dir from jupyter_client import KernelClient, KernelManager, connect from jupyter_client.consoleapp import JupyterConsoleApp from jupyter_client.session import Session class TemporaryWorkingDirectory(TemporaryDirectory): """ Creates a temporary directory and sets the cwd to that directory. Automatically reverts to previous cwd upon cleanup. Usage example: with TemporaryWorkingDirectory() as tmpdir: ... """ def __enter__(self): self.old_wd = os.getcwd() os.chdir(self.name) return super().__enter__() def __exit__(self, exc, value, tb): os.chdir(self.old_wd) return super().__exit__(exc, value, tb) class DummyConsoleApp(JupyterApp, JupyterConsoleApp): # type:ignore def initialize(self, argv=None): JupyterApp.initialize(self, argv=argv or []) self.init_connection_file() class DummyConfigurable(connect.ConnectionFileMixin): def initialize(self): pass sample_info: dict = dict( ip="1.2.3.4", transport="ipc", shell_port=1, hb_port=2, iopub_port=3, stdin_port=4, control_port=5, key=b"abc123", signature_scheme="hmac-md5", kernel_name="python", ) sample_info_kn: dict = dict( ip="1.2.3.4", transport="ipc", shell_port=1, hb_port=2, iopub_port=3, stdin_port=4, control_port=5, key=b"abc123", signature_scheme="hmac-md5", kernel_name="test", ) def test_write_connection_file(): with TemporaryDirectory() as d: cf = os.path.join(d, "kernel.json") connect.write_connection_file(cf, **sample_info) assert os.path.exists(cf) with open(cf) as f: info = json.load(f) info["key"] = info["key"].encode() assert info == sample_info def test_load_connection_file_session(): """test load_connection_file() after""" session = Session() app = DummyConsoleApp(session=Session()) app.initialize(argv=[]) session = app.session with TemporaryDirectory() as d: cf = os.path.join(d, "kernel.json") connect.write_connection_file(cf, **sample_info) app.connection_file = cf app.load_connection_file() assert session.key == sample_info["key"] assert session.signature_scheme == sample_info["signature_scheme"] def test_load_connection_file_session_with_kn(): """test load_connection_file() after""" session = Session() app = DummyConsoleApp(session=Session()) app.initialize(argv=[]) session = app.session with TemporaryDirectory() as d: cf = os.path.join(d, "kernel.json") connect.write_connection_file(cf, **sample_info_kn) app.connection_file = cf app.load_connection_file() assert session.key == sample_info_kn["key"] assert session.signature_scheme == sample_info_kn["signature_scheme"] def test_app_load_connection_file(): """test `ipython console --existing` loads a connection file""" with TemporaryDirectory() as d: cf = os.path.join(d, "kernel.json") connect.write_connection_file(cf, **sample_info) app = DummyConsoleApp(connection_file=cf) app.initialize(argv=[]) for attr, expected in sample_info.items(): if attr in ("key", "signature_scheme"): continue value = getattr(app, attr) assert value == expected, f"app.{attr} = {value} != {expected}" def test_load_connection_info(): client = KernelClient() info: dict = { "control_port": 53702, "hb_port": 53705, "iopub_port": 53703, "ip": "0.0.0.0", # noqa "key": "secret", "shell_port": 53700, "signature_scheme": "hmac-sha256", "stdin_port": 53701, "transport": "tcp", } client.load_connection_info(info) assert client.control_port == info["control_port"] assert client.session.key.decode("ascii") == info["key"] assert client.ip == info["ip"] def test_find_connection_file(): with TemporaryDirectory() as d: cf = "kernel.json" app = DummyConsoleApp(runtime_dir=d, connection_file=cf) app.initialize() security_dir = app.runtime_dir profile_cf = os.path.join(security_dir, cf) with open(profile_cf, "w") as f: f.write("{}") for query in ( "kernel.json", "kern*", "*ernel*", "k*", ): assert connect.find_connection_file(query, path=security_dir) == profile_cf def test_find_connection_file_local(): with TemporaryWorkingDirectory(): cf = "test.json" abs_cf = os.path.abspath(cf) with open(cf, "w") as f: f.write("{}") for query in ( "test.json", "test", abs_cf, os.path.join(".", "test.json"), ): assert connect.find_connection_file(query, path=[".", jupyter_runtime_dir()]) == abs_cf def test_find_connection_file_relative(): with TemporaryWorkingDirectory(): cf = "test.json" os.mkdir("subdir") cf = os.path.join("subdir", "test.json") abs_cf = os.path.abspath(cf) with open(cf, "w") as f: f.write("{}") for query in ( os.path.join(".", "subdir", "test.json"), os.path.join("subdir", "test.json"), abs_cf, ): assert connect.find_connection_file(query, path=[".", jupyter_runtime_dir()]) == abs_cf def test_find_connection_file_abspath(): with TemporaryDirectory(): cf = "absolute.json" abs_cf = os.path.abspath(cf) with open(cf, "w") as f: f.write("{}") assert connect.find_connection_file(abs_cf, path=jupyter_runtime_dir()) == abs_cf os.remove(abs_cf) def test_mixin_record_random_ports(): with TemporaryDirectory() as d: dc = DummyConfigurable(data_dir=d, kernel_name="via-tcp", transport="tcp") dc.write_connection_file() assert dc._connection_file_written assert os.path.exists(dc.connection_file) assert dc._random_port_names == connect.port_names def test_mixin_cleanup_random_ports(): with TemporaryDirectory() as d: dc = DummyConfigurable(data_dir=d, kernel_name="via-tcp", transport="tcp") dc.write_connection_file() filename = dc.connection_file dc.cleanup_random_ports() assert not os.path.exists(filename) for name in dc._random_port_names: # type:ignore assert getattr(dc, name) == 0 param_values = [ (True, True), (True, False), (False, True), (False, False), ] @pytest.mark.parametrize("file_exists, km_matches", param_values) def test_reconcile_connection_info(file_exists, km_matches): expected_info = sample_info mismatched_info = sample_info.copy() mismatched_info["key"] = b"def456" mismatched_info["shell_port"] = expected_info["shell_port"] + 42 mismatched_info["control_port"] = expected_info["control_port"] + 42 with TemporaryDirectory() as connection_dir: cf = os.path.join(connection_dir, "kernel.json") km = KernelManager() km.connection_file = cf if file_exists: _, info = connect.write_connection_file(cf, **expected_info) # set 'key' back to bytes info["key"] = info["key"].encode() # type:ignore if km_matches: # Let this be the case where the connection file exists, and the KM has matching # values prior to reconciliation. This is the LocalProvisioner case. provisioner_info = info km.load_connection_info(provisioner_info) else: # Let this be the case where the connection file exists, and the KM has those values # that differ from the ones returned by the provisioner. This is the restart-with- # changed-ports case (typical for remote provisioners). km.load_connection_info(expected_info) provisioner_info = mismatched_info else: # connection file does not exist if km_matches: # Let this be the case where the connection file does not exist, NOR does the KM # have any values of its own and reconciliation sets those values. This is the # non-LocalProvisioner case. provisioner_info = expected_info else: # Let this be the case where the connection file does not exist, yet the KM # has values that do not match those returned from the provisioner. This case # is probably not practical and is equivalent to the True, False case. km.load_connection_info(expected_info) provisioner_info = mismatched_info km._reconcile_connection_info(provisioner_info) km_info = km.get_connection_info() assert km._equal_connections(km_info, provisioner_info) jupyter_client-8.6.2/tests/test_consoleapp.py000066400000000000000000000020671462351563100215200ustar00rootroot00000000000000"""Tests for the JupyterConsoleApp""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import os import pytest from jupyter_core.application import JupyterApp from jupyter_client.consoleapp import JupyterConsoleApp from jupyter_client.manager import start_new_kernel class MockConsoleApp(JupyterConsoleApp, JupyterApp): # type:ignore pass def test_console_app_no_existing(): app = MockConsoleApp() app.initialize([]) def test_console_app_existing(tmp_path): km, kc = start_new_kernel() cf = kc.connection_file app = MockConsoleApp(connection_file=cf, existing=cf) app.initialize([]) kc.stop_channels() km.shutdown_kernel() def test_console_app_ssh(tmp_path): km, kc = start_new_kernel() cf = kc.connection_file os.chdir(tmp_path) app = MockConsoleApp( connection_file=cf, existing=cf, sshserver="does_not_exist", sshkey="test_console_app" ) with pytest.raises(SystemExit): app.initialize([]) kc.stop_channels() km.shutdown_kernel() jupyter_client-8.6.2/tests/test_jsonutil.py000066400000000000000000000134461462351563100212270ustar00rootroot00000000000000"""Test suite for our JSON utilities.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import datetime import json import numbers from datetime import timedelta from unittest import mock import pytest from dateutil.tz import tzlocal, tzoffset from jupyter_client import jsonutil from jupyter_client.session import utcnow REFERENCE_DATETIME = datetime.datetime(2013, 7, 3, 16, 34, 52, 249482, tzlocal()) class MyInt: def __int__(self): return 389 numbers.Integral.register(MyInt) class MyFloat: def __float__(self): return 3.14 numbers.Real.register(MyFloat) def test_parse_date_invalid(): assert jsonutil.parse_date(None) is None assert jsonutil.parse_date("") == "" assert jsonutil.parse_date("invalid-date") == "invalid-date" def test_parse_date_valid(): ref = REFERENCE_DATETIME timestamp = "2013-07-03T16:34:52.249482Z" parsed = jsonutil.parse_date(timestamp) assert isinstance(parsed, datetime.datetime) def test_parse_date_from_naive(): ref = REFERENCE_DATETIME timestamp = "2013-07-03T16:34:52.249482" with pytest.deprecated_call(match="Interpreting naive datetime as local"): parsed = jsonutil.parse_date(timestamp) assert isinstance(parsed, datetime.datetime) assert parsed.tzinfo is not None assert parsed.tzinfo.utcoffset(ref) == tzlocal().utcoffset(ref) assert parsed == ref def test_extract_date_from_naive(): ref = REFERENCE_DATETIME timestamp = "2013-07-03T16:34:52.249482" with pytest.deprecated_call(match="Interpreting naive datetime as local"): extracted = jsonutil.extract_dates(timestamp) assert isinstance(extracted, datetime.datetime) assert extracted.tzinfo is not None assert extracted.tzinfo.utcoffset(ref) == tzlocal().utcoffset(ref) assert extracted == ref def test_extract_dates_from_str(): ref = REFERENCE_DATETIME timestamp = "2013-07-03T16:34:52.249482Z" extracted = jsonutil.extract_dates(timestamp) assert isinstance(extracted, datetime.datetime) assert extracted.tzinfo is not None assert extracted.tzinfo.utcoffset(ref) == timedelta(0) def test_extract_dates_from_list(): ref = REFERENCE_DATETIME timestamps = [ "2013-07-03T16:34:52.249482Z", "2013-07-03T16:34:52.249482-0800", "2013-07-03T16:34:52.249482+0800", "2013-07-03T16:34:52.249482-08:00", "2013-07-03T16:34:52.249482+08:00", ] extracted = jsonutil.extract_dates(timestamps) for dt in extracted: assert isinstance(dt, datetime.datetime) assert dt.tzinfo is not None assert extracted[0].tzinfo.utcoffset(ref) == timedelta(0) assert extracted[1].tzinfo.utcoffset(ref) == timedelta(hours=-8) assert extracted[2].tzinfo.utcoffset(ref) == timedelta(hours=8) assert extracted[3].tzinfo.utcoffset(ref) == timedelta(hours=-8) assert extracted[4].tzinfo.utcoffset(ref) == timedelta(hours=8) def test_extract_dates_from_dict(): ref = REFERENCE_DATETIME timestamps = { 0: "2013-07-03T16:34:52.249482Z", 1: "2013-07-03T16:34:52.249482-0800", 2: "2013-07-03T16:34:52.249482+0800", 3: "2013-07-03T16:34:52.249482-08:00", 4: "2013-07-03T16:34:52.249482+08:00", } extracted = jsonutil.extract_dates(timestamps) for k in extracted: dt = extracted[k] assert isinstance(dt, datetime.datetime) assert dt.tzinfo is not None assert extracted[0].tzinfo.utcoffset(ref) == timedelta(0) assert extracted[1].tzinfo.utcoffset(ref) == timedelta(hours=-8) assert extracted[2].tzinfo.utcoffset(ref) == timedelta(hours=8) assert extracted[3].tzinfo.utcoffset(ref) == timedelta(hours=-8) assert extracted[4].tzinfo.utcoffset(ref) == timedelta(hours=8) def test_parse_ms_precision(): base = "2013-07-03T16:34:52" digits = "1234567890" parsed = jsonutil.parse_date(base + "Z") assert isinstance(parsed, datetime.datetime) for i in range(len(digits)): ts = base + "." + digits[:i] parsed = jsonutil.parse_date(ts + "Z") if i >= 1 and i <= 6: assert isinstance(parsed, datetime.datetime) else: assert isinstance(parsed, str) def test_json_default_date(): naive = datetime.datetime.now() # noqa local = tzoffset("Local", -8 * 3600) other = tzoffset("Other", 2 * 3600) data = dict(naive=naive, utc=utcnow(), withtz=naive.replace(tzinfo=other)) with mock.patch.object(jsonutil, "tzlocal", lambda: local): # noqa with pytest.deprecated_call(match="Please add timezone info"): jsondata = json.dumps(data, default=jsonutil.json_default) assert "Z" in jsondata assert jsondata.count("Z") == 1 extracted = jsonutil.extract_dates(json.loads(jsondata)) for dt in extracted.values(): assert isinstance(dt, datetime.datetime) assert dt.tzinfo is not None def test_json_default(): # list of input/expected output. Use None for the expected output if it # can be the same as the input. pairs: list = [ (1, None), # start with scalars (1.123, None), (1.0, None), ("a", None), (True, None), (False, None), (None, None), ({"key": b"\xFF"}, {"key": "/w=="}), # Containers ([1, 2], None), ((1, 2), [1, 2]), ({1, 2}, [1, 2]), (dict(x=1), None), ({"x": 1, "y": [1, 2, 3], "1": "int"}, None), # More exotic objects ((x for x in range(3)), [0, 1, 2]), (iter([1, 2]), [1, 2]), (MyFloat(), 3.14), (MyInt(), 389), ] for val, jval in pairs: if jval is None: jval = val # noqa out = json.loads(json.dumps(val, default=jsonutil.json_default)) # validate our cleanup assert out == jval jupyter_client-8.6.2/tests/test_kernelapp.py000066400000000000000000000033341462351563100213340ustar00rootroot00000000000000import os import shutil import sys import time from subprocess import PIPE, Popen from tempfile import mkdtemp def _launch(extra_env): env = os.environ.copy() env.update(extra_env) return Popen( [sys.executable, "-c", "from jupyter_client.kernelapp import main; main()"], env=env, stderr=PIPE, ) WAIT_TIME = 10 POLL_FREQ = 10 def test_kernelapp_lifecycle(): # Check that 'jupyter kernel' starts and terminates OK. runtime_dir = mkdtemp() startup_dir = mkdtemp() started = os.path.join(startup_dir, "started") try: p = _launch( { "JUPYTER_RUNTIME_DIR": runtime_dir, "JUPYTER_CLIENT_TEST_RECORD_STARTUP_PRIVATE": started, } ) # Wait for start for _ in range(WAIT_TIME * POLL_FREQ): if os.path.isfile(started): break time.sleep(1 / POLL_FREQ) else: raise AssertionError(f"No started file created in {WAIT_TIME} seconds") # Connection file should be there by now for _ in range(WAIT_TIME * POLL_FREQ): files = os.listdir(runtime_dir) if files: break time.sleep(1 / POLL_FREQ) else: raise AssertionError(f"No connection file created in {WAIT_TIME} seconds") assert len(files) == 1 cf = files[0] assert cf.startswith("kernel") assert cf.endswith(".json") # Send SIGTERM to shut down time.sleep(1) p.terminate() _, stderr = p.communicate(timeout=WAIT_TIME) assert cf in stderr.decode("utf-8", "replace") finally: shutil.rmtree(runtime_dir) shutil.rmtree(startup_dir) jupyter_client-8.6.2/tests/test_kernelmanager.py000066400000000000000000000470151462351563100221720ustar00rootroot00000000000000"""Tests for the KernelManager""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import asyncio import concurrent.futures import json import os import signal import sys import time from subprocess import PIPE import pytest from jupyter_core import paths from traitlets.config.loader import Config from jupyter_client import AsyncKernelManager, KernelManager from jupyter_client.manager import _ShutdownStatus, start_new_async_kernel, start_new_kernel from .utils import AsyncKMSubclass, SyncKMSubclass pjoin = os.path.join TIMEOUT = 60 @pytest.fixture(params=["tcp", "ipc"]) def transport(request): if sys.platform == "win32" and request.param == "ipc": # pytest.skip("Transport 'ipc' not supported on Windows.") return request.param @pytest.fixture def config(transport): c = Config() c.KernelManager.transport = transport if transport == "ipc": c.KernelManager.ip = "test" return c def _install_kernel(name="signaltest", extra_env=None): if extra_env is None: extra_env = {} kernel_dir = pjoin(paths.jupyter_data_dir(), "kernels", name) os.makedirs(kernel_dir, exist_ok=True) with open(pjoin(kernel_dir, "kernel.json"), "w") as f: f.write( json.dumps( { "argv": [ sys.executable, "-m", "tests.signalkernel", "-f", "{connection_file}", ], "display_name": "Signal Test Kernel", "env": {"TEST_VARS": "${TEST_VARS}:test_var_2", **extra_env}, } ) ) @pytest.fixture def install_kernel(): return _install_kernel() def install_kernel_dont_shutdown(): _install_kernel("signaltest-no-shutdown", {"NO_SHUTDOWN_REPLY": "1"}) def install_kernel_dont_terminate(): return _install_kernel( "signaltest-no-terminate", {"NO_SHUTDOWN_REPLY": "1", "NO_SIGTERM_REPLY": "1"} ) @pytest.fixture def km(config): km = KernelManager(config=config) return km @pytest.fixture def km_subclass(config): km = SyncKMSubclass(config=config) return km @pytest.fixture(params=[AsyncKernelManager, AsyncKMSubclass]) def async_km(request, config): km = request.param(config=config) return km @pytest.fixture def async_km_subclass(config): km = AsyncKMSubclass(config=config) return km class TestKernelManagerShutDownGracefully: parameters = ( "name, install, expected", [ ("signaltest", _install_kernel, _ShutdownStatus.ShutdownRequest), ( "signaltest-no-shutdown", install_kernel_dont_shutdown, _ShutdownStatus.SigtermRequest, ), ( "signaltest-no-terminate", install_kernel_dont_terminate, _ShutdownStatus.SigkillRequest, ), ], ) @pytest.mark.skipif(sys.platform == "win32", reason="Windows doesn't support signals") @pytest.mark.parametrize(*parameters) def test_signal_kernel_subprocesses(self, name, install, expected): # ipykernel doesn't support 3.6 and this test uses async shutdown_request if expected == _ShutdownStatus.ShutdownRequest and sys.version_info < (3, 7): pytest.skip() install() km, kc = start_new_kernel(kernel_name=name) assert km._shutdown_status == _ShutdownStatus.Unset assert km.is_alive() # kc.execute("1") kc.stop_channels() km.shutdown_kernel() if expected == _ShutdownStatus.ShutdownRequest: expected = [expected, _ShutdownStatus.SigtermRequest] else: expected = [expected] assert km._shutdown_status in expected @pytest.mark.skipif(sys.platform == "win32", reason="Windows doesn't support signals") @pytest.mark.parametrize(*parameters) async def test_async_signal_kernel_subprocesses(self, name, install, expected): install() km, kc = await start_new_async_kernel(kernel_name=name) assert km._shutdown_status == _ShutdownStatus.Unset assert await km.is_alive() # kc.execute("1") kc.stop_channels() await km.shutdown_kernel() if expected == _ShutdownStatus.ShutdownRequest: expected = [expected, _ShutdownStatus.SigtermRequest] else: expected = [expected] assert km._shutdown_status in expected class TestKernelManager: def test_lifecycle(self, km): km.start_kernel(stdout=PIPE, stderr=PIPE) kc = km.client() assert km.is_alive() is_done = km.ready.done() assert is_done km.restart_kernel(now=True) assert km.is_alive() km.interrupt_kernel() assert isinstance(km, KernelManager) kc.stop_channels() km.shutdown_kernel(now=True) assert km.context.closed def test_get_connect_info(self, km): cinfo = km.get_connection_info() keys = sorted(cinfo.keys()) expected = sorted( [ "ip", "transport", "hb_port", "shell_port", "stdin_port", "iopub_port", "control_port", "key", "signature_scheme", ] ) assert keys == expected @pytest.mark.skipif(sys.platform == "win32", reason="Windows doesn't support signals") async def test_signal_kernel_subprocesses(self, install_kernel, jp_start_kernel): km, kc = await jp_start_kernel("signaltest") async def execute(cmd): request_id = kc.execute(cmd) while True: reply = await kc.get_shell_msg(TIMEOUT) if reply["parent_header"]["msg_id"] == request_id: break content = reply["content"] assert content["status"] == "ok" return content N = 5 for i in range(N): await execute("start") time.sleep(1) # make sure subprocs stay up reply = await execute("check") assert reply["user_expressions"]["poll"] == [None] * N # start a job on the kernel to be interrupted kc.execute("sleep") time.sleep(1) # ensure sleep message has been handled before we interrupt await km.interrupt_kernel() reply = await kc.get_shell_msg(TIMEOUT) content = reply["content"] assert content["status"] == "ok" assert content["user_expressions"]["interrupted"] # wait up to 10s for subprocesses to handle signal for i in range(100): reply = await execute("check") if reply["user_expressions"]["poll"] != [-signal.SIGINT] * N: time.sleep(0.1) else: break # verify that subprocesses were interrupted assert reply["user_expressions"]["poll"] == [-signal.SIGINT] * N async def test_start_new_kernel(self, install_kernel, jp_start_kernel): km, kc = await jp_start_kernel("signaltest") assert await km.is_alive() assert await kc.is_alive() assert km.context.closed is False async def _env_test_body(self, kc): async def execute(cmd): request_id = kc.execute(cmd) while True: reply = await kc.get_shell_msg(TIMEOUT) if reply["parent_header"]["msg_id"] == request_id: break content = reply["content"] assert content["status"] == "ok" return content reply = await execute("env") assert reply is not None assert reply["user_expressions"]["env"] == "${TEST_VARS}:test_var_2" async def test_templated_kspec_env(self, install_kernel, jp_start_kernel): km, kc = await jp_start_kernel("signaltest") assert await km.is_alive() assert await kc.is_alive() assert km.context.closed is False await self._env_test_body(kc) def test_cleanup_context(self, km): assert km.context is not None km.cleanup_resources(restart=False) assert km.context.closed def test_no_cleanup_shared_context(self, jp_zmq_context): """kernel manager does not terminate shared context""" km = KernelManager(context=jp_zmq_context) assert km.context == jp_zmq_context assert km.context is not None km.cleanup_resources(restart=False) assert km.context.closed is False assert jp_zmq_context.closed is False def test_subclass_callables(self, km_subclass: SyncKMSubclass) -> None: km_subclass.reset_counts() km_subclass.start_kernel(stdout=PIPE, stderr=PIPE) assert km_subclass.call_count("start_kernel") == 1 assert km_subclass.call_count("_async_launch_kernel") == 1 is_alive = km_subclass.is_alive() assert is_alive km_subclass.reset_counts() km_subclass.restart_kernel(now=True) assert km_subclass.call_count("restart_kernel") == 1 assert km_subclass.call_count("_async_shutdown_kernel") == 1 assert km_subclass.call_count("_async_interrupt_kernel") == 1 assert km_subclass.call_count("_async_kill_kernel") == 1 assert km_subclass.call_count("_async_cleanup_resources") == 1 assert km_subclass.call_count("_async_launch_kernel") == 1 assert km_subclass.call_count("_async_signal_kernel") == 1 is_alive = km_subclass.is_alive() assert is_alive assert km_subclass.call_count("is_alive") >= 1 km_subclass.reset_counts() km_subclass.interrupt_kernel() assert km_subclass.call_count("_async_signal_kernel") == 1 assert isinstance(km_subclass, KernelManager) km_subclass.reset_counts() km_subclass.shutdown_kernel(now=False) assert km_subclass.call_count("shutdown_kernel") == 1 assert km_subclass.call_count("_async_interrupt_kernel") == 1 assert km_subclass.call_count("_async_cleanup_resources") == 1 assert km_subclass.call_count("_async_signal_kernel") == 1 assert km_subclass.call_count("_async_is_alive") >= 1 is_alive = km_subclass.is_alive() assert is_alive is False assert km_subclass.call_count("_async_is_alive") >= 1 assert km_subclass.context.closed class TestParallel: @pytest.mark.timeout(TIMEOUT) def test_start_sequence_kernels(self, config, install_kernel): """Ensure that a sequence of kernel startups doesn't break anything.""" self._run_signaltest_lifecycle(config) self._run_signaltest_lifecycle(config) self._run_signaltest_lifecycle(config) @pytest.mark.timeout(TIMEOUT + 10) def test_start_parallel_thread_kernels(self, config, install_kernel): if config.KernelManager.transport == "ipc": # FIXME pytest.skip("IPC transport is currently not working for this test!") self._run_signaltest_lifecycle(config) with concurrent.futures.ThreadPoolExecutor(max_workers=2) as thread_executor: future1 = thread_executor.submit(self._run_signaltest_lifecycle, config) future2 = thread_executor.submit(self._run_signaltest_lifecycle, config) future1.result() future2.result() @pytest.mark.timeout(TIMEOUT) @pytest.mark.skipif( (sys.platform == "darwin") and (sys.version_info >= (3, 6)) and (sys.version_info < (3, 8)), reason='"Bad file descriptor" error', ) def test_start_parallel_process_kernels(self, config, install_kernel): if config.KernelManager.transport == "ipc": # FIXME pytest.skip("IPC transport is currently not working for this test!") self._run_signaltest_lifecycle(config) with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread_executor: future1 = thread_executor.submit(self._run_signaltest_lifecycle, config) with concurrent.futures.ProcessPoolExecutor(max_workers=1) as process_executor: future2 = process_executor.submit(self._run_signaltest_lifecycle, config) future2.result() future1.result() @pytest.mark.timeout(TIMEOUT) @pytest.mark.skipif( (sys.platform == "darwin") and (sys.version_info >= (3, 6)) and (sys.version_info < (3, 8)), reason='"Bad file descriptor" error', ) def test_start_sequence_process_kernels(self, config, install_kernel): if config.KernelManager.transport == "ipc": # FIXME pytest.skip("IPC transport is currently not working for this test!") self._run_signaltest_lifecycle(config) with concurrent.futures.ProcessPoolExecutor(max_workers=1) as pool_executor: future = pool_executor.submit(self._run_signaltest_lifecycle, config) future.result() def _prepare_kernel(self, km, startup_timeout=TIMEOUT, **kwargs): km.start_kernel(**kwargs) kc = km.client() kc.start_channels() try: kc.wait_for_ready(timeout=startup_timeout) except RuntimeError: kc.stop_channels() km.shutdown_kernel() raise return kc def _run_signaltest_lifecycle(self, config=None): km = KernelManager(config=config, kernel_name="signaltest") kc = self._prepare_kernel(km, stdout=PIPE, stderr=PIPE) def execute(cmd): request_id = kc.execute(cmd) while True: reply = kc.get_shell_msg(TIMEOUT) if reply["parent_header"]["msg_id"] == request_id: break content = reply["content"] assert content["status"] == "ok" return content execute("start") assert km.is_alive() execute("check") assert km.is_alive() km.restart_kernel(now=True) assert km.is_alive() execute("check") km.shutdown_kernel() assert km.context.closed kc.stop_channels() class TestAsyncKernelManager: async def test_lifecycle(self, async_km): await async_km.start_kernel(stdout=PIPE, stderr=PIPE) is_alive = await async_km.is_alive() assert is_alive is_ready = async_km.ready.done() assert is_ready await async_km.restart_kernel(now=True) is_alive = await async_km.is_alive() assert is_alive await async_km.interrupt_kernel() assert isinstance(async_km, AsyncKernelManager) await async_km.shutdown_kernel(now=True) is_alive = await async_km.is_alive() assert is_alive is False assert async_km.context.closed async def test_get_connect_info(self, async_km): cinfo = async_km.get_connection_info() keys = sorted(cinfo.keys()) expected = sorted( [ "ip", "transport", "hb_port", "shell_port", "stdin_port", "iopub_port", "control_port", "key", "signature_scheme", ] ) assert keys == expected @pytest.mark.timeout(10) @pytest.mark.skipif(sys.platform == "win32", reason="Windows doesn't support signals") async def test_signal_kernel_subprocesses(self, install_kernel, jp_start_kernel): km, kc = await jp_start_kernel("signaltest") async def execute(cmd): request_id = kc.execute(cmd) while True: reply = await kc.get_shell_msg(TIMEOUT) if reply["parent_header"]["msg_id"] == request_id: break content = reply["content"] assert content["status"] == "ok" return content # Ensure that shutdown_kernel and stop_channels are called at the end of the test. # Note: we cannot use addCleanup() for these since it doesn't prpperly handle # coroutines - which km.shutdown_kernel now is. N = 5 for i in range(N): await execute("start") await asyncio.sleep(1) # make sure subprocs stay up reply = await execute("check") assert reply["user_expressions"]["poll"] == [None] * N # start a job on the kernel to be interrupted request_id = kc.execute("sleep") await asyncio.sleep(1) # ensure sleep message has been handled before we interrupt await km.interrupt_kernel() while True: reply = await kc.get_shell_msg(TIMEOUT) if reply["parent_header"]["msg_id"] == request_id: break content = reply["content"] assert content["status"] == "ok" assert content["user_expressions"]["interrupted"] is True # wait up to 5s for subprocesses to handle signal for i in range(50): reply = await execute("check") if reply["user_expressions"]["poll"] != [-signal.SIGINT] * N: await asyncio.sleep(0.1) else: break # verify that subprocesses were interrupted assert reply["user_expressions"]["poll"] == [-signal.SIGINT] * N @pytest.mark.timeout(10) async def test_start_new_async_kernel(self, install_kernel, jp_start_kernel): km, kc = await jp_start_kernel("signaltest") is_alive = await km.is_alive() assert is_alive is_alive = await kc.is_alive() assert is_alive async def test_subclass_callables(self, async_km_subclass: AsyncKMSubclass) -> None: async_km_subclass.reset_counts() await async_km_subclass.start_kernel(stdout=PIPE, stderr=PIPE) assert async_km_subclass.call_count("start_kernel") == 1 assert async_km_subclass.call_count("_async_launch_kernel") == 1 is_alive = await async_km_subclass.is_alive() assert is_alive assert async_km_subclass.call_count("is_alive") >= 1 async_km_subclass.reset_counts() await async_km_subclass.restart_kernel(now=True) assert async_km_subclass.call_count("restart_kernel") == 1 assert async_km_subclass.call_count("_async_shutdown_kernel") == 1 assert async_km_subclass.call_count("_async_interrupt_kernel") == 1 assert async_km_subclass.call_count("_async_kill_kernel") == 1 assert async_km_subclass.call_count("_async_cleanup_resources") == 1 assert async_km_subclass.call_count("_async_launch_kernel") == 1 assert async_km_subclass.call_count("_async_signal_kernel") == 1 is_alive = await async_km_subclass.is_alive() assert is_alive assert async_km_subclass.call_count("is_alive") >= 1 async_km_subclass.reset_counts() await async_km_subclass.interrupt_kernel() assert async_km_subclass.call_count("interrupt_kernel") == 1 assert async_km_subclass.call_count("_async_signal_kernel") == 1 assert isinstance(async_km_subclass, AsyncKernelManager) async_km_subclass.reset_counts() await async_km_subclass.shutdown_kernel(now=False) assert async_km_subclass.call_count("shutdown_kernel") == 1 assert async_km_subclass.call_count("_async_interrupt_kernel") == 1 assert async_km_subclass.call_count("_async_cleanup_resources") == 1 assert async_km_subclass.call_count("_async_signal_kernel") == 1 assert async_km_subclass.call_count("_async_is_alive") >= 1 is_alive = await async_km_subclass.is_alive() assert is_alive is False assert async_km_subclass.call_count("_async_is_alive") >= 1 assert async_km_subclass.context.closed jupyter_client-8.6.2/tests/test_kernelspec.py000066400000000000000000000152301462351563100215040ustar00rootroot00000000000000"""Tests for the KernelSpecManager""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import copy import json import os import sys import tempfile import unittest from io import StringIO from logging import StreamHandler from os.path import join as pjoin from subprocess import PIPE, STDOUT, Popen from tempfile import TemporaryDirectory import pytest from jupyter_core import paths from jupyter_client import kernelspec from .utils import install_kernel, sample_kernel_json class KernelSpecTests(unittest.TestCase): def setUp(self): self.sample_kernel_dir = install_kernel( pjoin(paths.jupyter_data_dir(), "kernels"), name="sample" ) self.ksm = kernelspec.KernelSpecManager() td2 = TemporaryDirectory() self.addCleanup(td2.cleanup) self.installable_kernel = td2.name with open(pjoin(self.installable_kernel, "kernel.json"), "w") as f: json.dump(sample_kernel_json, f) def test_find_kernel_specs(self): kernels = self.ksm.find_kernel_specs() self.assertEqual(kernels["sample"], self.sample_kernel_dir) def test_allowed_kernel_names(self): ksm = kernelspec.KernelSpecManager() ksm.allowed_kernelspecs = {"foo"} kernels = ksm.find_kernel_specs() assert not len(kernels) def test_deprecated_whitelist(self): ksm = kernelspec.KernelSpecManager() ksm.whitelist = {"bar"} kernels = ksm.find_kernel_specs() assert not len(kernels) def test_get_kernel_spec(self): ks = self.ksm.get_kernel_spec("SAMPLE") # Case insensitive self.assertEqual(ks.resource_dir, self.sample_kernel_dir) self.assertEqual(ks.argv, sample_kernel_json["argv"]) self.assertEqual(ks.display_name, sample_kernel_json["display_name"]) self.assertEqual(ks.env, {}) self.assertEqual(ks.metadata, {}) def test_find_all_specs(self): kernels = self.ksm.get_all_specs() self.assertEqual(kernels["sample"]["resource_dir"], self.sample_kernel_dir) self.assertIsNotNone(kernels["sample"]["spec"]) def test_kernel_spec_priority(self): td = TemporaryDirectory() self.addCleanup(td.cleanup) sample_kernel = install_kernel(td.name, name="sample") self.ksm.kernel_dirs.append(td.name) kernels = self.ksm.find_kernel_specs() self.assertEqual(kernels["sample"], self.sample_kernel_dir) self.ksm.kernel_dirs.insert(0, td.name) kernels = self.ksm.find_kernel_specs() self.assertEqual(kernels["sample"], sample_kernel) def test_install_kernel_spec(self): self.ksm.install_kernel_spec(self.installable_kernel, kernel_name="tstinstalled", user=True) self.assertIn("tstinstalled", self.ksm.find_kernel_specs()) # install again works self.ksm.install_kernel_spec(self.installable_kernel, kernel_name="tstinstalled", user=True) def test_install_kernel_spec_prefix(self): td = TemporaryDirectory() self.addCleanup(td.cleanup) capture = StringIO() handler = StreamHandler(capture) self.ksm.log.addHandler(handler) self.ksm.install_kernel_spec( self.installable_kernel, kernel_name="tstinstalled", prefix=td.name ) captured = capture.getvalue() self.ksm.log.removeHandler(handler) self.assertIn("may not be found", captured) self.assertNotIn("tstinstalled", self.ksm.find_kernel_specs()) # add prefix to path, so we find the spec self.ksm.kernel_dirs.append(pjoin(td.name, "share", "jupyter", "kernels")) self.assertIn("tstinstalled", self.ksm.find_kernel_specs()) # Run it again, no warning this time because we've added it to the path capture = StringIO() handler = StreamHandler(capture) self.ksm.log.addHandler(handler) self.ksm.install_kernel_spec( self.installable_kernel, kernel_name="tstinstalled", prefix=td.name ) captured = capture.getvalue() self.ksm.log.removeHandler(handler) self.assertNotIn("may not be found", captured) @pytest.mark.skipif( not (os.name != "nt" and not os.access("/usr/local/share", os.W_OK)), reason="needs Unix system without root privileges", ) def test_cant_install_kernel_spec(self): with self.assertRaises(OSError): self.ksm.install_kernel_spec( self.installable_kernel, kernel_name="tstinstalled", user=False ) def test_remove_kernel_spec(self): path = self.ksm.remove_kernel_spec("sample") self.assertEqual(path, self.sample_kernel_dir) def test_remove_kernel_spec_app(self): p = Popen( [ sys.executable, "-m", "jupyter_client.kernelspecapp", "remove", "sample", "-f", ], stdout=PIPE, stderr=STDOUT, env=os.environ, ) out, _ = p.communicate() self.assertEqual(p.returncode, 0, out.decode("utf8", "replace")) def test_validate_kernel_name(self): for good in [ "julia-0.4", "ipython", "R", "python_3", "Haskell-1-2-3", ]: assert kernelspec._is_valid_kernel_name(good) for bad in [ "has space", "ünicode", "%percent", "question?", ]: assert not kernelspec._is_valid_kernel_name(bad) def test_subclass(self): """Test get_all_specs in subclasses that override find_kernel_specs""" ksm = self.ksm resource_dir = tempfile.gettempdir() native_name = kernelspec.NATIVE_KERNEL_NAME native_kernel = ksm.get_kernel_spec(native_name) class MyKSM(kernelspec.KernelSpecManager): def get_kernel_spec(self, name): spec = copy.copy(native_kernel) if name == "fake": spec.name = name spec.resource_dir = resource_dir elif name == native_name: pass else: raise KeyError(name) return spec def find_kernel_specs(self): return { "fake": resource_dir, native_name: native_kernel.resource_dir, } # ensure that get_all_specs doesn't raise if only # find_kernel_specs and get_kernel_spec are defined myksm = MyKSM() specs = myksm.get_all_specs() assert sorted(specs) == ["fake", native_name] jupyter_client-8.6.2/tests/test_kernelspecapp.py000066400000000000000000000025521462351563100222100ustar00rootroot00000000000000"""Tests for the kernelspecapp""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import os import warnings from jupyter_client.kernelspecapp import ( InstallKernelSpec, KernelSpecApp, ListKernelSpecs, ListProvisioners, RemoveKernelSpec, ) def test_kernelspec_sub_apps(jp_kernel_dir): app = InstallKernelSpec() prefix = os.path.dirname(os.environ["JUPYTER_DATA_DIR"]) kernel_dir = os.path.join(prefix, "share/jupyter/kernels") app.kernel_spec_manager.kernel_dirs.append(kernel_dir) app.prefix = prefix app.initialize([str(jp_kernel_dir)]) with warnings.catch_warnings(): warnings.simplefilter("ignore") app.start() app1 = ListKernelSpecs() app1.kernel_spec_manager.kernel_dirs.append(kernel_dir) specs = app1.start() assert specs and "echo" in specs app2 = RemoveKernelSpec(spec_names=["echo"], force=True) app2.kernel_spec_manager.kernel_dirs.append(kernel_dir) app2.start() app3 = ListKernelSpecs() app3.kernel_spec_manager.kernel_dirs.append(kernel_dir) specs = app3.start() assert specs and "echo" not in specs def test_kernelspec_app(): app = KernelSpecApp() app.initialize(["list"]) app.start() def test_list_provisioners_app(): app = ListProvisioners() app.initialize([]) app.start() jupyter_client-8.6.2/tests/test_localinterfaces.py000066400000000000000000000017661462351563100225200ustar00rootroot00000000000000# ----------------------------------------------------------------------------- # Copyright (c) The Jupyter Development Team # # Distributed under the terms of the BSD License. The full license is in # the file LICENSE, distributed as part of this software. # ----------------------------------------------------------------------------- import sys from jupyter_client import localinterfaces def test_load_ips(): # Override the machinery that skips it if it was called before localinterfaces._load_ips.called = False # type:ignore[attr-defined] # Just check this doesn't error localinterfaces._load_ips(suppress_exceptions=False) localinterfaces.is_local_ip("8.8.8.8") localinterfaces.is_public_ip("127.0.0.1") ips = localinterfaces.local_ips() assert "127.0.0.1" in ips localinterfaces._load_ips_gethostbyname() localinterfaces._load_ips_dumb() if sys.platform == "linux": localinterfaces._load_ips_ip() localinterfaces._load_ips_ifconfig() jupyter_client-8.6.2/tests/test_manager.py000066400000000000000000000035511462351563100207660ustar00rootroot00000000000000"""Tests for KernelManager""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import os import tempfile from unittest import mock from jupyter_client.kernelspec import KernelSpec from jupyter_client.manager import KernelManager def test_connection_file_real_path(): """Verify realpath is used when formatting connection file""" with mock.patch("os.path.realpath") as patched_realpath: patched_realpath.return_value = "foobar" km = KernelManager( connection_file=os.path.join(tempfile.gettempdir(), "kernel-test.json"), kernel_name="test_kernel", ) # KernelSpec and launch args have to be mocked as we don't have an actual kernel on disk km._kernel_spec = KernelSpec( resource_dir="test", **{ "argv": ["python.exe", "-m", "test_kernel", "-f", "{connection_file}"], "env": {}, "display_name": "test_kernel", "language": "python", "metadata": {}, }, ) km._launch_args = {} cmds = km.format_kernel_cmd() assert cmds[4] == "foobar" def test_env_update_launch_args_not_set(): km = KernelManager() km.update_env(env={"A": "A"}) def test_env_update_launch_args_not_dict(): km = KernelManager() km._launch_args = None km.update_env(env={"B": "B"}) def test_env_update_launch_args_no_env(): km = KernelManager() km._launch_args = {} km.update_env(env={"C": "C"}) def test_env_update_launch_args_env_not_dict(): km = KernelManager() km._launch_args = {"env": None} km.update_env(env={"D": "D"}) def test_env_update_launch_args_env_dic(): km = KernelManager() km._launch_args = {"env": {}} km.update_env(env={"E": "E"}) assert km._launch_args["env"]["E"] == "E" jupyter_client-8.6.2/tests/test_multikernelmanager.py000066400000000000000000000551261462351563100232470ustar00rootroot00000000000000"""Tests for the notebook kernel and session manager.""" import asyncio import concurrent.futures import os import sys import uuid from asyncio import ensure_future from subprocess import PIPE from unittest import TestCase import pytest from jupyter_core import paths from tornado.testing import AsyncTestCase, gen_test from traitlets.config.loader import Config from jupyter_client import AsyncKernelManager, KernelManager from jupyter_client.localinterfaces import localhost from jupyter_client.multikernelmanager import AsyncMultiKernelManager, MultiKernelManager from .utils import ( AsyncKMSubclass, AsyncMKMSubclass, SyncKMSubclass, SyncMKMSubclass, install_kernel, skip_win32, ) TIMEOUT = 30 async def now(awaitable): """Use this function ensure that this awaitable happens before other awaitables defined after it. """ (out,) = await asyncio.gather(awaitable) return out class TestKernelManager(TestCase): # static so picklable for multiprocessing on Windows @staticmethod def _get_tcp_km(): c = Config() km = MultiKernelManager(config=c) return km @staticmethod def _get_tcp_km_sub(): c = Config() km = SyncMKMSubclass(config=c) return km # static so picklable for multiprocessing on Windows @staticmethod def _get_ipc_km(): c = Config() c.KernelManager.transport = "ipc" c.KernelManager.ip = "test" km = MultiKernelManager(config=c) return km # static so picklable for multiprocessing on Windows @staticmethod def _run_lifecycle(km, test_kid=None): if test_kid: kid = km.start_kernel(stdout=PIPE, stderr=PIPE, kernel_id=test_kid) assert kid == test_kid else: kid = km.start_kernel(stdout=PIPE, stderr=PIPE) assert km.is_alive(kid) assert km.get_kernel(kid).ready.done() assert kid in km assert kid in km.list_kernel_ids() assert len(km) == 1, f"{len(km)} != {1}" km.restart_kernel(kid, now=True) assert km.is_alive(kid) assert kid in km.list_kernel_ids() km.interrupt_kernel(kid) k = km.get_kernel(kid) kc = k.client() assert isinstance(k, KernelManager) km.shutdown_kernel(kid, now=True) assert kid not in km, f"{kid} not in {km}" kc.stop_channels() def _run_cinfo(self, km, transport, ip): kid = km.start_kernel(stdout=PIPE, stderr=PIPE) km.get_kernel(kid) cinfo = km.get_connection_info(kid) self.assertEqual(transport, cinfo["transport"]) self.assertEqual(ip, cinfo["ip"]) self.assertTrue("stdin_port" in cinfo) self.assertTrue("iopub_port" in cinfo) stream = km.connect_iopub(kid) stream.close() self.assertTrue("shell_port" in cinfo) stream = km.connect_shell(kid) stream.close() self.assertTrue("hb_port" in cinfo) stream = km.connect_hb(kid) stream.close() km.shutdown_kernel(kid, now=True) # static so picklable for multiprocessing on Windows @classmethod def test_tcp_lifecycle(cls): km = cls._get_tcp_km() cls._run_lifecycle(km) def test_tcp_lifecycle_with_kernel_id(self): km = self._get_tcp_km() self._run_lifecycle(km, test_kid=str(uuid.uuid4())) def test_shutdown_all(self): km = self._get_tcp_km() kid = km.start_kernel(stdout=PIPE, stderr=PIPE) self.assertIn(kid, km) km.shutdown_all() self.assertNotIn(kid, km) # shutdown again is okay, because we have no kernels km.shutdown_all() def test_tcp_cinfo(self): km = self._get_tcp_km() self._run_cinfo(km, "tcp", localhost()) @skip_win32 def test_ipc_lifecycle(self): km = self._get_ipc_km() self._run_lifecycle(km) @skip_win32 def test_ipc_cinfo(self): km = self._get_ipc_km() self._run_cinfo(km, "ipc", "test") def test_start_sequence_tcp_kernels(self): """Ensure that a sequence of kernel startups doesn't break anything.""" self._run_lifecycle(self._get_tcp_km()) self._run_lifecycle(self._get_tcp_km()) self._run_lifecycle(self._get_tcp_km()) @skip_win32 def test_start_sequence_ipc_kernels(self): """Ensure that a sequence of kernel startups doesn't break anything.""" self._run_lifecycle(self._get_ipc_km()) self._run_lifecycle(self._get_ipc_km()) self._run_lifecycle(self._get_ipc_km()) def tcp_lifecycle_with_loop(self): # Ensure each thread has an event loop loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) self.test_tcp_lifecycle() loop.close() def test_start_parallel_thread_kernels(self): self.test_tcp_lifecycle() with concurrent.futures.ThreadPoolExecutor(max_workers=2) as thread_executor: future1 = thread_executor.submit(self.tcp_lifecycle_with_loop) future2 = thread_executor.submit(self.tcp_lifecycle_with_loop) future1.result() future2.result() @pytest.mark.skipif( (sys.platform == "darwin") and (sys.version_info >= (3, 6)) and (sys.version_info < (3, 8)), reason='"Bad file descriptor" error', ) @pytest.mark.skipif( sys.platform == "linux", reason="Kernel refuses to start in process pool", ) def test_start_parallel_process_kernels(self): self.test_tcp_lifecycle() with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread_executor: future1 = thread_executor.submit(self.tcp_lifecycle_with_loop) with concurrent.futures.ProcessPoolExecutor(max_workers=1) as process_executor: # Windows tests needs this target to be picklable: future2 = process_executor.submit(self.test_tcp_lifecycle) future2.result() future1.result() def test_subclass_callables(self): km = self._get_tcp_km_sub() km.reset_counts() kid = km.start_kernel(stdout=PIPE, stderr=PIPE) assert km.call_count("start_kernel") == 1 assert isinstance(km.get_kernel(kid), SyncKMSubclass) assert km.get_kernel(kid).call_count("start_kernel") == 1 assert km.get_kernel(kid).call_count("_async_launch_kernel") == 1 assert km.is_alive(kid) assert kid in km assert kid in km.list_kernel_ids() assert len(km) == 1, f"{len(km)} != {1}" km.get_kernel(kid).reset_counts() km.reset_counts() km.restart_kernel(kid, now=True) assert km.call_count("restart_kernel") == 1 assert km.call_count("get_kernel") == 1 assert km.get_kernel(kid).call_count("restart_kernel") == 1 assert km.get_kernel(kid).call_count("_async_shutdown_kernel") == 1 assert km.get_kernel(kid).call_count("_async_interrupt_kernel") == 1 assert km.get_kernel(kid).call_count("_async_kill_kernel") == 1 assert km.get_kernel(kid).call_count("_async_cleanup_resources") == 1 assert km.get_kernel(kid).call_count("_async_launch_kernel") == 1 assert km.is_alive(kid) assert kid in km.list_kernel_ids() km.get_kernel(kid).reset_counts() km.reset_counts() km.interrupt_kernel(kid) assert km.call_count("interrupt_kernel") == 1 assert km.call_count("get_kernel") == 1 assert km.get_kernel(kid).call_count("interrupt_kernel") == 1 km.get_kernel(kid).reset_counts() km.reset_counts() k = km.get_kernel(kid) assert isinstance(k, SyncKMSubclass) assert km.call_count("get_kernel") == 1 km.get_kernel(kid).reset_counts() km.reset_counts() km.shutdown_all(now=True) assert km.call_count("remove_kernel") == 1 assert km.call_count("request_shutdown") == 0 assert km.call_count("finish_shutdown") == 0 assert km.call_count("cleanup_resources") == 0 assert kid not in km, f"{kid} not in {km}" def test_stream_on_recv(self): mkm = self._get_tcp_km() kid = mkm.start_kernel(stdout=PIPE, stderr=PIPE) stream = mkm.connect_iopub(kid) km = mkm.get_kernel(kid) client = km.client() session = km.session called = False def record_activity(msg_list): nonlocal called """Record an IOPub message arriving from a kernel""" idents, fed_msg_list = session.feed_identities(msg_list) msg = session.deserialize(fed_msg_list, content=False) msg_type = msg["header"]["msg_type"] stream.send(msg) called = True stream.on_recv(record_activity) while True: client.kernel_info() import time time.sleep(0.1) if called: break client.stop_channels() km.shutdown_kernel(now=True) class TestAsyncKernelManager(AsyncTestCase): # static so picklable for multiprocessing on Windows @staticmethod def _get_tcp_km(): c = Config() km = AsyncMultiKernelManager(config=c) return km @staticmethod def _get_tcp_km_sub(): c = Config() km = AsyncMKMSubclass(config=c) return km # static so picklable for multiprocessing on Windows @staticmethod def _get_ipc_km(): c = Config() c.KernelManager.transport = "ipc" c.KernelManager.ip = "test" km = AsyncMultiKernelManager(config=c) return km @staticmethod def _get_pending_kernels_km(): c = Config() c.AsyncMultiKernelManager.use_pending_kernels = True km = AsyncMultiKernelManager(config=c) return km # static so picklable for multiprocessing on Windows @staticmethod async def _run_lifecycle(km, test_kid=None): if test_kid: kid = await km.start_kernel(stdout=PIPE, stderr=PIPE, kernel_id=test_kid) assert kid == test_kid else: kid = await km.start_kernel(stdout=PIPE, stderr=PIPE) assert await km.is_alive(kid) assert kid in km assert kid in km.list_kernel_ids() assert len(km) == 1, f"{len(km)} != {1}" # Ensure we can interrupt during a restart. fut = km.restart_kernel(kid, now=True) await km.interrupt_kernel(kid) assert await km.is_alive(kid) await fut assert kid in km.list_kernel_ids() k = km.get_kernel(kid) assert isinstance(k, AsyncKernelManager) await km.shutdown_kernel(kid, now=True) assert kid not in km, f"{kid} not in {km}" async def _run_cinfo(self, km, transport, ip): kid = await km.start_kernel(stdout=PIPE, stderr=PIPE) km.get_kernel(kid) cinfo = km.get_connection_info(kid) self.assertEqual(transport, cinfo["transport"]) self.assertEqual(ip, cinfo["ip"]) self.assertTrue("stdin_port" in cinfo) self.assertTrue("iopub_port" in cinfo) stream = km.connect_iopub(kid) stream.close() self.assertTrue("shell_port" in cinfo) stream = km.connect_shell(kid) stream.close() self.assertTrue("hb_port" in cinfo) stream = km.connect_hb(kid) stream.close() await km.shutdown_kernel(kid, now=True) self.assertNotIn(kid, km) @gen_test async def test_tcp_lifecycle(self): await self.raw_tcp_lifecycle() @gen_test async def test_tcp_lifecycle_with_kernel_id(self): await self.raw_tcp_lifecycle(test_kid=str(uuid.uuid4())) @gen_test async def test_shutdown_all(self): km = self._get_tcp_km() kid = await km.start_kernel(stdout=PIPE, stderr=PIPE) self.assertIn(kid, km) await km.shutdown_all() self.assertNotIn(kid, km) # shutdown again is okay, because we have no kernels await km.shutdown_all() @gen_test(timeout=20) async def test_use_after_shutdown_all(self): km = self._get_tcp_km() kid = await km.start_kernel(stdout=PIPE, stderr=PIPE) self.assertIn(kid, km) await km.shutdown_all() self.assertNotIn(kid, km) # Start another kernel kid = await km.start_kernel(stdout=PIPE, stderr=PIPE) self.assertIn(kid, km) await km.shutdown_all() self.assertNotIn(kid, km) # shutdown again is okay, because we have no kernels await km.shutdown_all() @gen_test(timeout=20) async def test_shutdown_all_while_starting(self): km = self._get_tcp_km() kid_future = asyncio.ensure_future(km.start_kernel(stdout=PIPE, stderr=PIPE)) # This is relying on the ordering of the asyncio queue, not sure if guaranteed or not: kid, _ = await asyncio.gather(kid_future, km.shutdown_all()) self.assertNotIn(kid, km) # Start another kernel kid = await ensure_future(km.start_kernel(stdout=PIPE, stderr=PIPE)) self.assertIn(kid, km) self.assertEqual(len(km), 1) await km.shutdown_all() self.assertNotIn(kid, km) # shutdown again is okay, because we have no kernels await km.shutdown_all() @gen_test async def test_use_pending_kernels(self): km = self._get_pending_kernels_km() kid = await ensure_future(km.start_kernel(stdout=PIPE, stderr=PIPE)) kernel = km.get_kernel(kid) assert not kernel.ready.done() assert kid in km assert kid in km.list_kernel_ids() assert len(km) == 1, f"{len(km)} != {1}" # Wait for the kernel to start. await kernel.ready await km.restart_kernel(kid, now=True) out = await km.is_alive(kid) assert out assert kid in km.list_kernel_ids() await km.interrupt_kernel(kid) k = km.get_kernel(kid) assert isinstance(k, AsyncKernelManager) await ensure_future(km.shutdown_kernel(kid, now=True)) # Wait for the kernel to shutdown await kernel.ready assert kid not in km, f"{kid} not in {km}" @gen_test async def test_use_pending_kernels_early_restart(self): km = self._get_pending_kernels_km() kid = await ensure_future(km.start_kernel(stdout=PIPE, stderr=PIPE)) kernel = km.get_kernel(kid) assert not kernel.ready.done() with pytest.raises(RuntimeError): await km.restart_kernel(kid, now=True) await kernel.ready await ensure_future(km.shutdown_kernel(kid, now=True)) # Wait for the kernel to shutdown await kernel.ready assert kid not in km, f"{kid} not in {km}" @gen_test async def test_use_pending_kernels_early_shutdown(self): km = self._get_pending_kernels_km() kid = await ensure_future(km.start_kernel(stdout=PIPE, stderr=PIPE)) kernel = km.get_kernel(kid) assert not kernel.ready.done() # Try shutting down while the kernel is pending await ensure_future(km.shutdown_kernel(kid, now=True)) # Wait for the kernel to shutdown await kernel.ready assert kid not in km, f"{kid} not in {km}" @gen_test async def test_use_pending_kernels_early_interrupt(self): km = self._get_pending_kernels_km() kid = await ensure_future(km.start_kernel(stdout=PIPE, stderr=PIPE)) kernel = km.get_kernel(kid) assert not kernel.ready.done() with pytest.raises(RuntimeError): await km.interrupt_kernel(kid) # Now wait for the kernel to be ready. await kernel.ready await ensure_future(km.shutdown_kernel(kid, now=True)) # Wait for the kernel to shutdown await kernel.ready assert kid not in km, f"{kid} not in {km}" @gen_test async def test_tcp_cinfo(self): km = self._get_tcp_km() await self._run_cinfo(km, "tcp", localhost()) @skip_win32 @gen_test async def test_ipc_lifecycle(self): km = self._get_ipc_km() await self._run_lifecycle(km) @skip_win32 @gen_test async def test_ipc_cinfo(self): km = self._get_ipc_km() await self._run_cinfo(km, "ipc", "test") @gen_test async def test_start_sequence_tcp_kernels(self): """Ensure that a sequence of kernel startups doesn't break anything.""" await self._run_lifecycle(self._get_tcp_km()) await self._run_lifecycle(self._get_tcp_km()) await self._run_lifecycle(self._get_tcp_km()) @skip_win32 @gen_test async def test_start_sequence_ipc_kernels(self): """Ensure that a sequence of kernel startups doesn't break anything.""" await self._run_lifecycle(self._get_ipc_km()) await self._run_lifecycle(self._get_ipc_km()) await self._run_lifecycle(self._get_ipc_km()) def tcp_lifecycle_with_loop(self): # Ensure each thread has an event loop loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop.run_until_complete(self.raw_tcp_lifecycle()) loop.close() # static so picklable for multiprocessing on Windows @classmethod async def raw_tcp_lifecycle(cls, test_kid=None): # Since @gen_test creates an event loop, we need a raw form of # test_tcp_lifecycle that assumes the loop already exists. km = cls._get_tcp_km() await cls._run_lifecycle(km, test_kid=test_kid) # static so picklable for multiprocessing on Windows @classmethod def raw_tcp_lifecycle_sync(cls, test_kid=None): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop.run_until_complete(cls.raw_tcp_lifecycle(test_kid=test_kid)) loop.close() @gen_test async def test_start_parallel_thread_kernels(self): await self.raw_tcp_lifecycle() with concurrent.futures.ThreadPoolExecutor(max_workers=2) as thread_executor: future1 = thread_executor.submit(self.tcp_lifecycle_with_loop) future2 = thread_executor.submit(self.tcp_lifecycle_with_loop) future1.result() future2.result() @gen_test async def test_start_parallel_process_kernels(self): await self.raw_tcp_lifecycle() with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread_executor: future1 = thread_executor.submit(self.tcp_lifecycle_with_loop) with concurrent.futures.ProcessPoolExecutor(max_workers=1) as process_executor: # Windows tests needs this target to be picklable: future2 = process_executor.submit(self.raw_tcp_lifecycle_sync) future2.result() future1.result() @gen_test async def test_subclass_callables(self): mkm = self._get_tcp_km_sub() mkm.reset_counts() kid = await mkm.start_kernel(stdout=PIPE, stderr=PIPE) assert mkm.call_count("start_kernel") == 1 assert isinstance(mkm.get_kernel(kid), AsyncKMSubclass) assert mkm.get_kernel(kid).call_count("start_kernel") == 1 assert mkm.get_kernel(kid).call_count("_async_launch_kernel") == 1 assert await mkm.is_alive(kid) assert kid in mkm assert kid in mkm.list_kernel_ids() assert len(mkm) == 1, f"{len(mkm)} != {1}" mkm.get_kernel(kid).reset_counts() mkm.reset_counts() await mkm.restart_kernel(kid, now=True) assert mkm.call_count("restart_kernel") == 1 assert mkm.call_count("get_kernel") == 1 assert mkm.get_kernel(kid).call_count("restart_kernel") == 1 assert mkm.get_kernel(kid).call_count("_async_interrupt_kernel") == 1 assert mkm.get_kernel(kid).call_count("_async_kill_kernel") == 1 assert mkm.get_kernel(kid).call_count("_async_cleanup_resources") == 1 assert mkm.get_kernel(kid).call_count("_async_launch_kernel") == 1 assert await mkm.is_alive(kid) assert kid in mkm.list_kernel_ids() mkm.get_kernel(kid).reset_counts() mkm.reset_counts() await mkm.interrupt_kernel(kid) assert mkm.call_count("interrupt_kernel") == 1 assert mkm.call_count("get_kernel") == 1 assert mkm.get_kernel(kid).call_count("interrupt_kernel") == 1 mkm.get_kernel(kid).reset_counts() mkm.reset_counts() k = mkm.get_kernel(kid) assert isinstance(k, AsyncKMSubclass) assert mkm.call_count("get_kernel") == 1 mkm.get_kernel(kid).reset_counts() mkm.reset_counts() await mkm.shutdown_all(now=True) assert mkm.call_count("remove_kernel") == 1 assert mkm.call_count("_async_request_shutdown") == 0 assert mkm.call_count("_async_finish_shutdown") == 0 assert mkm.call_count("_async_cleanup_resources") == 0 assert kid not in mkm, f"{kid} not in {mkm}" @gen_test async def test_bad_kernelspec(self): km = self._get_tcp_km() install_kernel( os.path.join(paths.jupyter_data_dir(), "kernels"), argv=["non_existent_executable"], name="bad", ) with pytest.raises(FileNotFoundError): await ensure_future(km.start_kernel(kernel_name="bad", stdout=PIPE, stderr=PIPE)) @gen_test async def test_bad_kernelspec_pending(self): km = self._get_pending_kernels_km() install_kernel( os.path.join(paths.jupyter_data_dir(), "kernels"), argv=["non_existent_executable"], name="bad", ) kernel_id = await ensure_future( km.start_kernel(kernel_name="bad", stdout=PIPE, stderr=PIPE) ) with pytest.raises(FileNotFoundError): await km.get_kernel(kernel_id).ready assert kernel_id in km.list_kernel_ids() await ensure_future(km.shutdown_kernel(kernel_id)) assert kernel_id not in km.list_kernel_ids() @gen_test async def test_stream_on_recv(self): mkm = self._get_tcp_km() kid = await mkm.start_kernel(stdout=PIPE, stderr=PIPE) stream = mkm.connect_iopub(kid) km = mkm.get_kernel(kid) client = km.client() session = km.session called = False def record_activity(msg_list): nonlocal called """Record an IOPub message arriving from a kernel""" idents, fed_msg_list = session.feed_identities(msg_list) msg = session.deserialize(fed_msg_list, content=False) msg_type = msg["header"]["msg_type"] stream.send(msg) called = True stream.on_recv(record_activity) while True: await client.kernel_info(reply=True) if called: break await asyncio.sleep(0.1) client.stop_channels() await km.shutdown_kernel(now=True) jupyter_client-8.6.2/tests/test_provisioning.py000066400000000000000000000271641462351563100221100ustar00rootroot00000000000000"""Test Provisioning""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import asyncio import json import os import signal import sys from subprocess import PIPE from typing import Any, Dict, List, Optional import pytest from jupyter_core import paths from traitlets import Int, Unicode from jupyter_client.connect import KernelConnectionInfo from jupyter_client.kernelspec import KernelSpecManager, NoSuchKernel from jupyter_client.launcher import launch_kernel from jupyter_client.manager import AsyncKernelManager from jupyter_client.provisioning import ( KernelProvisionerBase, KernelProvisionerFactory, LocalProvisioner, ) from jupyter_client.provisioning.factory import EntryPoint pjoin = os.path.join class SubclassedTestProvisioner(LocalProvisioner): # type:ignore config_var_1: int = Int(config=True) # type:ignore config_var_2: str = Unicode(config=True) # type:ignore pass class CustomTestProvisioner(KernelProvisionerBase): # type:ignore process = None pid = None pgid = None config_var_1: int = Int(config=True) # type:ignore config_var_2: str = Unicode(config=True) # type:ignore @property def has_process(self) -> bool: return self.process is not None async def poll(self) -> Optional[int]: ret = 0 if self.process: ret = self.process.poll() return ret async def wait(self) -> Optional[int]: ret = 0 if self.process: while await self.poll() is None: await asyncio.sleep(0.1) # Process is no longer alive, wait and clear ret = self.process.wait() # Make sure all the fds get closed. for attr in ["stdout", "stderr", "stdin"]: fid = getattr(self.process, attr) if fid: fid.close() self.process = None return ret async def send_signal(self, signum: int) -> None: if self.process: if signum == signal.SIGINT and sys.platform == "win32": from jupyter_client.win_interrupt import send_interrupt send_interrupt(self.process.win32_interrupt_event) return # Prefer process-group over process if self.pgid and hasattr(os, "killpg"): try: os.killpg(self.pgid, signum) return except OSError: pass return self.process.send_signal(signum) async def kill(self, restart: bool = False) -> None: if self.process: self.process.kill() async def terminate(self, restart: bool = False) -> None: if self.process: self.process.terminate() async def pre_launch(self, **kwargs: Any) -> Dict[str, Any]: km = self.parent if km: # save kwargs for use in restart km._launch_args = kwargs.copy() # build the Popen cmd extra_arguments = kwargs.pop("extra_arguments", []) # write connection file / get default ports km.write_connection_file() self.connection_info = km.get_connection_info() kernel_cmd = km.format_kernel_cmd( extra_arguments=extra_arguments ) # This needs to remain here for b/c return await super().pre_launch(cmd=kernel_cmd, **kwargs) return {} async def launch_kernel(self, cmd: List[str], **kwargs: Any) -> KernelConnectionInfo: scrubbed_kwargs = kwargs self.process = launch_kernel(cmd, **scrubbed_kwargs) pgid = None if hasattr(os, "getpgid"): try: pgid = os.getpgid(self.process.pid) except OSError: pass self.pid = self.process.pid self.pgid = pgid return self.connection_info async def cleanup(self, restart: bool = False) -> None: pass class NewTestProvisioner(CustomTestProvisioner): # type:ignore pass def build_kernelspec(name: str, provisioner: Optional[str] = None) -> None: spec: dict = { "argv": [ sys.executable, "-m", "tests.signalkernel", "-f", "{connection_file}", ], "display_name": f"Signal Test Kernel w {provisioner}", "env": {"TEST_VARS": "${TEST_VARS}:test_var_2"}, "metadata": {}, } if provisioner: kernel_provisioner = {"kernel_provisioner": {"provisioner_name": provisioner}} spec["metadata"].update(kernel_provisioner) if provisioner != "local-provisioner": spec["metadata"]["kernel_provisioner"]["config"] = { "config_var_1": 42, "config_var_2": name, } kernel_dir = pjoin(paths.jupyter_data_dir(), "kernels", name) os.makedirs(kernel_dir) with open(pjoin(kernel_dir, "kernel.json"), "w") as f: f.write(json.dumps(spec)) def new_provisioner(): build_kernelspec("new_provisioner", "new-test-provisioner") def custom_provisioner(): build_kernelspec("custom_provisioner", "custom-test-provisioner") @pytest.fixture def all_provisioners(): build_kernelspec("no_provisioner") build_kernelspec("missing_provisioner", "missing-provisioner") build_kernelspec("default_provisioner", "local-provisioner") build_kernelspec("subclassed_provisioner", "subclassed-test-provisioner") custom_provisioner() @pytest.fixture( params=[ "no_provisioner", "default_provisioner", "missing_provisioner", "custom_provisioner", "subclassed_provisioner", ] ) def akm(request, all_provisioners): return AsyncKernelManager(kernel_name=request.param) initial_provisioner_map = { "local-provisioner": "jupyter_client.provisioning:LocalProvisioner", "subclassed-test-provisioner": "tests.test_provisioning:SubclassedTestProvisioner", "custom-test-provisioner": "tests.test_provisioning:CustomTestProvisioner", } def mock_get_all_provisioners() -> List[EntryPoint]: result = [] for name, epstr in initial_provisioner_map.items(): result.append(EntryPoint(name, epstr, KernelProvisionerFactory.GROUP_NAME)) return result def mock_get_provisioner(_: str, name: str) -> EntryPoint: if name == "new-test-provisioner": return EntryPoint( "new-test-provisioner", "tests.test_provisioning:NewTestProvisioner", KernelProvisionerFactory.GROUP_NAME, ) if name in initial_provisioner_map: return EntryPoint(name, initial_provisioner_map[name], KernelProvisionerFactory.GROUP_NAME) raise ValueError("No such entry point") @pytest.fixture def kpf(monkeypatch): """Setup the Kernel Provisioner Factory, mocking the entrypoint fetch calls.""" monkeypatch.setattr( KernelProvisionerFactory, "_get_all_provisioners", mock_get_all_provisioners ) monkeypatch.setattr(KernelProvisionerFactory, "_get_provisioner", mock_get_provisioner) factory = KernelProvisionerFactory.instance() return factory class TestDiscovery: def test_find_all_specs(self, kpf, all_provisioners): ksm = KernelSpecManager() kernels = ksm.get_all_specs() # Ensure specs for initial provisioners exist, # and missing_provisioner & new_provisioner don't assert "no_provisioner" in kernels assert "default_provisioner" in kernels assert "subclassed_provisioner" in kernels assert "custom_provisioner" in kernels assert "missing_provisioner" not in kernels assert "new_provisioner" not in kernels def test_get_missing(self, all_provisioners): ksm = KernelSpecManager() with pytest.raises(NoSuchKernel): ksm.get_kernel_spec("missing_provisioner") def test_get_new(self, kpf): new_provisioner() # Introduce provisioner after initialization of KPF ksm = KernelSpecManager() kernel = ksm.get_kernel_spec("new_provisioner") assert kernel.metadata["kernel_provisioner"]["provisioner_name"] == "new-test-provisioner" class TestRuntime: async def akm_test(self, kernel_mgr): """Starts a kernel, validates the associated provisioner's config, shuts down kernel""" assert kernel_mgr.provisioner is None if kernel_mgr.kernel_name == "missing_provisioner": with pytest.raises(NoSuchKernel): await kernel_mgr.start_kernel() else: await kernel_mgr.start_kernel() TestRuntime.validate_provisioner(kernel_mgr) await kernel_mgr.shutdown_kernel() assert kernel_mgr.provisioner is not None assert kernel_mgr.provisioner.has_process is False async def test_existing(self, kpf, akm): await self.akm_test(akm) async def test_new(self, kpf): new_provisioner() # Introduce provisioner after initialization of KPF new_km = AsyncKernelManager(kernel_name="new_provisioner") await self.akm_test(new_km) async def test_custom_lifecycle(self, kpf): custom_provisioner() async_km = AsyncKernelManager(kernel_name="custom_provisioner") await async_km.start_kernel(stdout=PIPE, stderr=PIPE) is_alive = await async_km.is_alive() assert is_alive await async_km.restart_kernel(now=True) is_alive = await async_km.is_alive() assert is_alive await async_km.interrupt_kernel() assert isinstance(async_km, AsyncKernelManager) await async_km.shutdown_kernel(now=True) is_alive = await async_km.is_alive() assert is_alive is False assert async_km.context.closed async def test_default_provisioner_config(self, kpf, all_provisioners): kpf.default_provisioner_name = "custom-test-provisioner" async_km = AsyncKernelManager(kernel_name="no_provisioner") await async_km.start_kernel(stdout=PIPE, stderr=PIPE) is_alive = await async_km.is_alive() assert is_alive assert isinstance(async_km.provisioner, CustomTestProvisioner) assert async_km.provisioner.config_var_1 == 0 # Not in kernelspec, so default of 0 exists await async_km.shutdown_kernel(now=True) is_alive = await async_km.is_alive() assert is_alive is False assert async_km.context.closed @staticmethod def validate_provisioner(akm: AsyncKernelManager) -> None: # Ensure the provisioner is managing a process at this point assert akm.provisioner is not None and akm.provisioner.has_process # Validate provisioner config if akm.kernel_name in ["no_provisioner", "default_provisioner"]: assert not hasattr(akm.provisioner, "config_var_1") assert not hasattr(akm.provisioner, "config_var_2") else: assert akm.provisioner.config_var_1 == 42 # type:ignore assert akm.provisioner.config_var_2 == akm.kernel_name # type:ignore # Validate provisioner class if akm.kernel_name in ["no_provisioner", "default_provisioner", "subclassed_provisioner"]: assert isinstance(akm.provisioner, LocalProvisioner) if akm.kernel_name == "subclassed_provisioner": assert isinstance(akm.provisioner, SubclassedTestProvisioner) else: assert not isinstance(akm.provisioner, SubclassedTestProvisioner) else: assert isinstance(akm.provisioner, CustomTestProvisioner) assert not isinstance(akm.provisioner, LocalProvisioner) if akm.kernel_name == "new_provisioner": assert isinstance(akm.provisioner, NewTestProvisioner) jupyter_client-8.6.2/tests/test_public_api.py000066400000000000000000000012501462351563100214550ustar00rootroot00000000000000"""Test the jupyter_client public API """ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import jupyter_client from jupyter_client import connect, launcher def test_kms(): for base in ("", "Async", "Multi"): KM = base + "KernelManager" assert KM in dir(jupyter_client) def test_kcs(): for base in ("", "Blocking", "Async"): KM = base + "KernelClient" assert KM in dir(jupyter_client) def test_launcher(): for name in launcher.__all__: assert name in dir(jupyter_client) def test_connect(): for name in connect.__all__: assert name in dir(jupyter_client) jupyter_client-8.6.2/tests/test_restarter.py000066400000000000000000000203241462351563100213640ustar00rootroot00000000000000"""Tests for the KernelManager""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import asyncio import json import os import sys from concurrent.futures import Future import pytest from jupyter_core import paths from traitlets.config.loader import Config from traitlets.log import get_logger from jupyter_client.ioloop import AsyncIOLoopKernelManager, IOLoopKernelManager pjoin = os.path.join def _install_kernel(name="problemtest", extra_env=None): if extra_env is None: extra_env = {} kernel_dir = pjoin(paths.jupyter_data_dir(), "kernels", name) os.makedirs(kernel_dir) with open(pjoin(kernel_dir, "kernel.json"), "w") as f: f.write( json.dumps( { "argv": [ sys.executable, "-m", "tests.problemkernel", "-f", "{connection_file}", ], "display_name": "Problematic Test Kernel", "env": {"TEST_VARS": "${TEST_VARS}:test_var_2", **extra_env}, } ) ) return name @pytest.fixture def install_kernel(): return _install_kernel("problemtest") @pytest.fixture def install_fail_kernel(): return _install_kernel("problemtest-fail", extra_env={"FAIL_ON_START": "1"}) @pytest.fixture def install_slow_fail_kernel(): return _install_kernel( "problemtest-slow", extra_env={"STARTUP_DELAY": "5", "FAIL_ON_START": "1"} ) @pytest.fixture(params=["tcp", "ipc"]) def transport(request): if sys.platform == "win32" and request.param == "ipc": # pytest.skip("Transport 'ipc' not supported on Windows.") return request.param @pytest.fixture def config(transport): c = Config() c.KernelManager.transport = transport if transport == "ipc": c.KernelManager.ip = "test" return c @pytest.fixture def debug_logging(): get_logger().setLevel("DEBUG") win_skip = pytest.mark.skipif( os.name == "nt", reason='"RuntimeError: Cannot run the event loop while another loop is running" error on Windows', ) @win_skip async def test_restart_check(config, install_kernel, debug_logging): """Test that the kernel is restarted and recovers""" # If this test fails, run it with --log-cli-level=DEBUG to inspect N_restarts = 1 config.KernelRestarter.restart_limit = N_restarts config.KernelRestarter.debug = True km = IOLoopKernelManager(kernel_name=install_kernel, config=config) cbs = 0 restarts: list = [Future() for i in range(N_restarts)] def cb(): nonlocal cbs if cbs >= N_restarts: raise RuntimeError("Kernel restarted more than %d times!" % N_restarts) restarts[cbs].set_result(True) cbs += 1 try: km.start_kernel() km.add_restart_callback(cb, "restart") except BaseException: if km.has_kernel: km.shutdown_kernel() raise try: for i in range(N_restarts + 1): kc = km.client() kc.start_channels() kc.wait_for_ready(timeout=60) kc.stop_channels() if i < N_restarts: # Kill without cleanup to simulate crash: assert km.provisioner is not None await km.provisioner.kill() restarts[i].result() # Wait for kill + restart max_wait = 10.0 waited = 0.0 while waited < max_wait and km.is_alive(): await asyncio.sleep(0.1) waited += 0.1 while waited < max_wait and not km.is_alive(): await asyncio.sleep(0.1) waited += 0.1 assert cbs == N_restarts assert km.is_alive() finally: km.shutdown_kernel(now=True) assert km.context.closed @win_skip async def test_restarter_gives_up(config, install_fail_kernel, debug_logging): """Test that the restarter gives up after reaching the restart limit""" # If this test fails, run it with --log-cli-level=DEBUG to inspect N_restarts = 1 config.KernelRestarter.restart_limit = N_restarts config.KernelRestarter.debug = True km = IOLoopKernelManager(kernel_name=install_fail_kernel, config=config) cbs = 0 restarts: list = [Future() for i in range(N_restarts)] def cb(): nonlocal cbs if cbs >= N_restarts: raise RuntimeError("Kernel restarted more than %d times!" % N_restarts) restarts[cbs].set_result(True) cbs += 1 died: Future = Future() def on_death(): died.set_result(True) try: km.start_kernel() km.add_restart_callback(cb, "restart") km.add_restart_callback(on_death, "dead") except BaseException: if km.has_kernel: km.shutdown_kernel() raise try: for i in range(N_restarts): restarts[i].result() assert died.result() assert cbs == N_restarts finally: km.shutdown_kernel(now=True) assert km.context.closed async def test_async_restart_check(config, install_kernel, debug_logging): """Test that the kernel is restarted and recovers""" # If this test fails, run it with --log-cli-level=DEBUG to inspect N_restarts = 1 config.KernelRestarter.restart_limit = N_restarts config.KernelRestarter.debug = True km = AsyncIOLoopKernelManager(kernel_name=install_kernel, config=config) cbs = 0 restarts: list = [asyncio.Future() for i in range(N_restarts)] def cb(): nonlocal cbs if cbs >= N_restarts: raise RuntimeError("Kernel restarted more than %d times!" % N_restarts) restarts[cbs].set_result(True) cbs += 1 try: await km.start_kernel() km.add_restart_callback(cb, "restart") except BaseException: if km.has_kernel: await km.shutdown_kernel() raise try: for i in range(N_restarts + 1): kc = km.client() kc.start_channels() await kc.wait_for_ready(timeout=60) kc.stop_channels() if i < N_restarts: # Kill without cleanup to simulate crash: assert km.provisioner is not None await km.provisioner.kill() await restarts[i] # Wait for kill + restart max_wait = 10.0 waited = 0.0 while waited < max_wait and await km.is_alive(): await asyncio.sleep(0.1) waited += 0.1 while waited < max_wait and not await km.is_alive(): await asyncio.sleep(0.1) waited += 0.1 assert cbs == N_restarts assert await km.is_alive() finally: await km.shutdown_kernel(now=True) assert km.context.closed async def test_async_restarter_gives_up(config, install_slow_fail_kernel, debug_logging): """Test that the restarter gives up after reaching the restart limit""" # If this test fails, run it with --log-cli-level=DEBUG to inspect N_restarts = 2 config.KernelRestarter.restart_limit = N_restarts config.KernelRestarter.debug = True config.KernelRestarter.stable_start_time = 30.0 km = AsyncIOLoopKernelManager(kernel_name=install_slow_fail_kernel, config=config) cbs = 0 restarts: list = [asyncio.Future() for i in range(N_restarts)] def cb(): nonlocal cbs if cbs >= N_restarts: raise RuntimeError("Kernel restarted more than %d times!" % N_restarts) restarts[cbs].set_result(True) cbs += 1 died: asyncio.Future = asyncio.Future() def on_death(): died.set_result(True) try: await km.start_kernel() km.add_restart_callback(cb, "restart") km.add_restart_callback(on_death, "dead") except BaseException: if km.has_kernel: await km.shutdown_kernel() raise try: await asyncio.gather(*restarts) assert await died assert cbs == N_restarts finally: await km.shutdown_kernel(now=True) assert km.context.closed jupyter_client-8.6.2/tests/test_session.py000066400000000000000000000466051462351563100210460ustar00rootroot00000000000000"""test building messages with Session""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import hmac import math import os import platform import uuid import warnings from datetime import datetime from unittest import mock import pytest import zmq from dateutil.tz import tzlocal from tornado import ioloop from zmq.eventloop.zmqstream import ZMQStream from jupyter_client import jsonutil from jupyter_client import session as ss def _bad_packer(obj): raise TypeError("I don't work") def _bad_unpacker(bytes): raise TypeError("I don't work either") @pytest.fixture def no_copy_threshold(): """Disable zero-copy optimizations in pyzmq >= 17""" with mock.patch.object(zmq, "COPY_THRESHOLD", 1, create=True): yield @pytest.fixture() def session(): return ss.Session() @pytest.mark.usefixtures("no_copy_threshold") class TestSession: def assertEqual(self, a, b): assert a == b, (a, b) def assertTrue(self, a): assert a, a def test_msg(self, session): """message format""" msg = session.msg("execute") thekeys = set("header parent_header metadata content msg_type msg_id".split()) s = set(msg.keys()) self.assertEqual(s, thekeys) self.assertTrue(isinstance(msg["content"], dict)) self.assertTrue(isinstance(msg["metadata"], dict)) self.assertTrue(isinstance(msg["header"], dict)) self.assertTrue(isinstance(msg["parent_header"], dict)) self.assertTrue(isinstance(msg["msg_id"], str)) self.assertTrue(isinstance(msg["msg_type"], str)) self.assertEqual(msg["header"]["msg_type"], "execute") self.assertEqual(msg["msg_type"], "execute") def test_serialize(self, session): msg = session.msg("execute", content=dict(a=10, b=1.1)) msg_list = session.serialize(msg, ident=b"foo") ident, msg_list = session.feed_identities(msg_list) new_msg = session.deserialize(msg_list) self.assertEqual(ident[0], b"foo") self.assertEqual(new_msg["msg_id"], msg["msg_id"]) self.assertEqual(new_msg["msg_type"], msg["msg_type"]) self.assertEqual(new_msg["header"], msg["header"]) self.assertEqual(new_msg["content"], msg["content"]) self.assertEqual(new_msg["parent_header"], msg["parent_header"]) self.assertEqual(new_msg["metadata"], msg["metadata"]) # ensure floats don't come out as Decimal: self.assertEqual(type(new_msg["content"]["b"]), type(msg["content"]["b"])) def test_default_secure(self, session): assert isinstance(session.key, bytes) assert isinstance(session.auth, hmac.HMAC) def test_send_sync(self, session): ctx = zmq.Context() A = ctx.socket(zmq.PAIR) B = ctx.socket(zmq.PAIR) A.bind("inproc://test") B.connect("inproc://test") msg = session.msg("execute", content=dict(a=10)) session.send(A, msg, ident=b"foo", buffers=[b"bar"]) ident, msg_list = session.feed_identities(B.recv_multipart()) new_msg = session.deserialize(msg_list) self.assertEqual(ident[0], b"foo") self.assertEqual(new_msg["msg_id"], msg["msg_id"]) self.assertEqual(new_msg["msg_type"], msg["msg_type"]) self.assertEqual(new_msg["header"], msg["header"]) self.assertEqual(new_msg["content"], msg["content"]) self.assertEqual(new_msg["parent_header"], msg["parent_header"]) self.assertEqual(new_msg["metadata"], msg["metadata"]) self.assertEqual(new_msg["buffers"], [b"bar"]) content = msg["content"] header = msg["header"] header["msg_id"] = session.msg_id parent = msg["parent_header"] metadata = msg["metadata"] header["msg_type"] session.send( A, None, content=content, parent=parent, header=header, metadata=metadata, ident=b"foo", buffers=[b"bar"], ) ident, msg_list = session.feed_identities(B.recv_multipart()) new_msg = session.deserialize(msg_list) self.assertEqual(ident[0], b"foo") self.assertEqual(new_msg["msg_id"], header["msg_id"]) self.assertEqual(new_msg["msg_type"], msg["msg_type"]) self.assertEqual(new_msg["header"], msg["header"]) self.assertEqual(new_msg["content"], msg["content"]) self.assertEqual(new_msg["metadata"], msg["metadata"]) self.assertEqual(new_msg["parent_header"], msg["parent_header"]) self.assertEqual(new_msg["buffers"], [b"bar"]) header["msg_id"] = session.msg_id session.send(A, msg, ident=b"foo", buffers=[b"bar"]) ident, new_msg = session.recv(B) self.assertEqual(ident[0], b"foo") self.assertEqual(new_msg["msg_id"], header["msg_id"]) self.assertEqual(new_msg["msg_type"], msg["msg_type"]) self.assertEqual(new_msg["header"], msg["header"]) self.assertEqual(new_msg["content"], msg["content"]) self.assertEqual(new_msg["metadata"], msg["metadata"]) self.assertEqual(new_msg["parent_header"], msg["parent_header"]) self.assertEqual(new_msg["buffers"], [b"bar"]) # buffers must support the buffer protocol with pytest.raises(TypeError): session.send(A, msg, ident=b"foo", buffers=[1]) # buffers must be contiguous buf = memoryview(os.urandom(16)) with pytest.raises(ValueError): session.send(A, msg, ident=b"foo", buffers=[buf[::2]]) A.close() B.close() ctx.term() async def test_send(self, session): ctx = zmq.asyncio.Context() A = ctx.socket(zmq.PAIR) B = ctx.socket(zmq.PAIR) A.bind("inproc://test") B.connect("inproc://test") msg = session.msg("execute", content=dict(a=10)) session.send(A, msg, ident=b"foo", buffers=[b"bar"]) ident, msg_list = session.feed_identities(await B.recv_multipart()) new_msg = session.deserialize(msg_list) self.assertEqual(ident[0], b"foo") self.assertEqual(new_msg["msg_id"], msg["msg_id"]) self.assertEqual(new_msg["msg_type"], msg["msg_type"]) self.assertEqual(new_msg["header"], msg["header"]) self.assertEqual(new_msg["content"], msg["content"]) self.assertEqual(new_msg["parent_header"], msg["parent_header"]) self.assertEqual(new_msg["metadata"], msg["metadata"]) self.assertEqual(new_msg["buffers"], [b"bar"]) content = msg["content"] header = msg["header"] header["msg_id"] = session.msg_id parent = msg["parent_header"] metadata = msg["metadata"] header["msg_type"] session.send( A, None, content=content, parent=parent, header=header, metadata=metadata, ident=b"foo", buffers=[b"bar"], ) ident, msg_list = session.feed_identities(await B.recv_multipart()) new_msg = session.deserialize(msg_list) self.assertEqual(ident[0], b"foo") self.assertEqual(new_msg["msg_id"], header["msg_id"]) self.assertEqual(new_msg["msg_type"], msg["msg_type"]) self.assertEqual(new_msg["header"], msg["header"]) self.assertEqual(new_msg["content"], msg["content"]) self.assertEqual(new_msg["metadata"], msg["metadata"]) self.assertEqual(new_msg["parent_header"], msg["parent_header"]) self.assertEqual(new_msg["buffers"], [b"bar"]) header["msg_id"] = session.msg_id session.send(A, msg, ident=b"foo", buffers=[b"bar"]) ident, new_msg = session.recv(B) self.assertEqual(ident[0], b"foo") self.assertEqual(new_msg["msg_id"], header["msg_id"]) self.assertEqual(new_msg["msg_type"], msg["msg_type"]) self.assertEqual(new_msg["header"], msg["header"]) self.assertEqual(new_msg["content"], msg["content"]) self.assertEqual(new_msg["metadata"], msg["metadata"]) self.assertEqual(new_msg["parent_header"], msg["parent_header"]) self.assertEqual(new_msg["buffers"], [b"bar"]) # buffers must support the buffer protocol with pytest.raises(TypeError): session.send(A, msg, ident=b"foo", buffers=[1]) # buffers must be contiguous buf = memoryview(os.urandom(16)) with pytest.raises(ValueError): session.send(A, msg, ident=b"foo", buffers=[buf[::2]]) A.close() B.close() ctx.term() def test_args(self, session): """initialization arguments for Session""" s = session self.assertTrue(s.pack is ss.default_packer) self.assertTrue(s.unpack is ss.default_unpacker) self.assertEqual(s.username, os.environ.get("USER", "username")) s = ss.Session() self.assertEqual(s.username, os.environ.get("USER", "username")) with pytest.raises(TypeError): ss.Session(pack="hi") with pytest.raises(TypeError): ss.Session(unpack="hi") u = str(uuid.uuid4()) s = ss.Session(username="carrot", session=u) self.assertEqual(s.session, u) self.assertEqual(s.username, "carrot") @pytest.mark.skipif(platform.python_implementation() == "PyPy", reason="Test fails on PyPy") def test_tracking_sync(self, session): """test tracking messages""" ctx = zmq.Context() a = ctx.socket(zmq.PAIR) b = ctx.socket(zmq.PAIR) a.bind("inproc://test") b.connect("inproc://test") s = session s.copy_threshold = 1 loop = ioloop.IOLoop(make_current=False) ZMQStream(a, io_loop=loop) msg = s.send(a, "hello", track=False) self.assertTrue(msg["tracker"] is ss.DONE) msg = s.send(a, "hello", track=True) self.assertTrue(isinstance(msg["tracker"], zmq.MessageTracker)) M = zmq.Message(b"hi there", track=True) msg = s.send(a, "hello", buffers=[M], track=True) t = msg["tracker"] self.assertTrue(isinstance(t, zmq.MessageTracker)) with pytest.raises(zmq.NotDone): t.wait(0.1) del M with pytest.raises(zmq.NotDone): t.wait(1) # this will raise a.close() b.close() ctx.term() @pytest.mark.skipif(platform.python_implementation() == "PyPy", reason="Test fails on PyPy") async def test_tracking(self, session): """test tracking messages""" ctx = zmq.asyncio.Context() a = ctx.socket(zmq.PAIR) b = ctx.socket(zmq.PAIR) a.bind("inproc://test") b.connect("inproc://test") s = session s.copy_threshold = 1 loop = ioloop.IOLoop(make_current=False) msg = s.send(a, "hello", track=False) self.assertTrue(msg["tracker"] is ss.DONE) msg = s.send(a, "hello", track=True) self.assertTrue(isinstance(msg["tracker"], zmq.MessageTracker)) M = zmq.Message(b"hi there", track=True) msg = s.send(a, "hello", buffers=[M], track=True) t = msg["tracker"] self.assertTrue(isinstance(t, zmq.MessageTracker)) with pytest.raises(zmq.NotDone): t.wait(0.1) del M with pytest.raises(zmq.NotDone): t.wait(1) # this will raise a.close() b.close() ctx.term() def test_unique_msg_ids(self, session): """test that messages receive unique ids""" ids = set() for i in range(2**12): h = session.msg_header("test") msg_id = h["msg_id"] self.assertTrue(msg_id not in ids) ids.add(msg_id) def test_feed_identities(self, session): """scrub the front for zmq IDENTITIES""" content = dict(code="whoda", stuff=object()) session.msg("execute", content=content) def test_session_id(self): session = ss.Session() # get bs before us bs = session.bsession us = session.session self.assertEqual(us.encode("ascii"), bs) session = ss.Session() # get us before bs us = session.session bs = session.bsession self.assertEqual(us.encode("ascii"), bs) # change propagates: session.session = "something else" bs = session.bsession us = session.session self.assertEqual(us.encode("ascii"), bs) session = ss.Session(session="stuff") # get us before bs self.assertEqual(session.bsession, session.session.encode("ascii")) self.assertEqual(b"stuff", session.bsession) def test_zero_digest_history(self): session = ss.Session(digest_history_size=0) for i in range(11): session._add_digest(uuid.uuid4().bytes) self.assertEqual(len(session.digest_history), 0) def test_cull_digest_history(self): session = ss.Session(digest_history_size=100) for i in range(100): session._add_digest(uuid.uuid4().bytes) self.assertTrue(len(session.digest_history) == 100) session._add_digest(uuid.uuid4().bytes) self.assertTrue(len(session.digest_history) == 91) for i in range(9): session._add_digest(uuid.uuid4().bytes) self.assertTrue(len(session.digest_history) == 100) session._add_digest(uuid.uuid4().bytes) self.assertTrue(len(session.digest_history) == 91) def assertIn(self, a, b): assert a in b def test_bad_pack(self): try: ss.Session(pack=_bad_packer) except ValueError as e: self.assertIn("could not serialize", str(e)) self.assertIn("don't work", str(e)) else: raise ValueError("Should have raised ValueError") def test_bad_unpack(self): try: ss.Session(unpack=_bad_unpacker) except ValueError as e: self.assertIn("could not handle output", str(e)) self.assertIn("don't work either", str(e)) else: raise ValueError("Should have raised ValueError") def test_bad_packer(self): try: ss.Session(packer=__name__ + "._bad_packer") except ValueError as e: self.assertIn("could not serialize", str(e)) self.assertIn("don't work", str(e)) else: raise ValueError("Should have raised ValueError") def test_bad_unpacker(self): try: ss.Session(unpacker=__name__ + "._bad_unpacker") except ValueError as e: self.assertIn("could not handle output", str(e)) self.assertIn("don't work either", str(e)) else: raise ValueError("Should have raised ValueError") def test_bad_roundtrip(self): with pytest.raises(ValueError): ss.Session(unpack=lambda b: 5) def _datetime_test(self, session): content = dict(t=ss.utcnow()) metadata = dict(t=ss.utcnow()) p = session.msg("msg") msg = session.msg("msg", content=content, metadata=metadata, parent=p["header"]) smsg = session.serialize(msg) msg2 = session.deserialize(session.feed_identities(smsg)[1]) assert isinstance(msg2["header"]["date"], datetime) self.assertEqual(msg["header"], msg2["header"]) self.assertEqual(msg["parent_header"], msg2["parent_header"]) self.assertEqual(msg["parent_header"], msg2["parent_header"]) assert isinstance(msg["content"]["t"], datetime) assert isinstance(msg["metadata"]["t"], datetime) assert isinstance(msg2["content"]["t"], str) assert isinstance(msg2["metadata"]["t"], str) self.assertEqual(msg["content"], jsonutil.extract_dates(msg2["content"])) self.assertEqual(msg["content"], jsonutil.extract_dates(msg2["content"])) def test_datetimes(self, session): self._datetime_test(session) def test_datetimes_pickle(self): session = ss.Session(packer="pickle") self._datetime_test(session) def test_datetimes_msgpack(self): msgpack = pytest.importorskip("msgpack") session = ss.Session( pack=msgpack.packb, unpack=lambda buf: msgpack.unpackb(buf, raw=False), ) self._datetime_test(session) def test_send_raw_sync(self, session): ctx = zmq.Context() A = ctx.socket(zmq.PAIR) B = ctx.socket(zmq.PAIR) A.bind("inproc://test") B.connect("inproc://test") msg = session.msg("execute", content=dict(a=10)) msg_list = [ session.pack(msg[part]) for part in ["header", "parent_header", "metadata", "content"] ] session.send_raw(A, msg_list, ident=b"foo") ident, new_msg_list = session.feed_identities(B.recv_multipart()) new_msg = session.deserialize(new_msg_list) self.assertEqual(ident[0], b"foo") self.assertEqual(new_msg["msg_type"], msg["msg_type"]) self.assertEqual(new_msg["header"], msg["header"]) self.assertEqual(new_msg["parent_header"], msg["parent_header"]) self.assertEqual(new_msg["content"], msg["content"]) self.assertEqual(new_msg["metadata"], msg["metadata"]) A.close() B.close() ctx.term() async def test_send_raw(self, session): ctx = zmq.asyncio.Context() A = ctx.socket(zmq.PAIR) B = ctx.socket(zmq.PAIR) A.bind("inproc://test") B.connect("inproc://test") msg = session.msg("execute", content=dict(a=10)) msg_list = [ session.pack(msg[part]) for part in ["header", "parent_header", "metadata", "content"] ] session.send_raw(A, msg_list, ident=b"foo") ident, new_msg_list = session.feed_identities(B.recv_multipart().result()) # type:ignore new_msg = session.deserialize(new_msg_list) self.assertEqual(ident[0], b"foo") self.assertEqual(new_msg["msg_type"], msg["msg_type"]) self.assertEqual(new_msg["header"], msg["header"]) self.assertEqual(new_msg["parent_header"], msg["parent_header"]) self.assertEqual(new_msg["content"], msg["content"]) self.assertEqual(new_msg["metadata"], msg["metadata"]) A.close() B.close() ctx.term() def test_set_packer(self, session): s = session s.packer = "json" s.unpacker = "json" def test_clone(self, session): s = session s._add_digest("initial") s2 = s.clone() assert s2.session == s.session assert s2.digest_history == s.digest_history assert s2.digest_history is not s.digest_history digest = "abcdef" s._add_digest(digest) assert digest in s.digest_history assert digest not in s2.digest_history def test_squash_unicode(): assert ss.squash_unicode(dict(a="1")) == {b"a": b"1"} assert ss.squash_unicode(["a", 1]) == [b"a", 1] assert ss.squash_unicode("hi") == b"hi" def test_json_packer(): ss.json_packer(dict(a=1)) with pytest.raises(ValueError): ss.json_packer(dict(a=ss.Session())) ss.json_packer(dict(a=datetime(2021, 4, 1, 12, tzinfo=tzlocal()))) with warnings.catch_warnings(): warnings.simplefilter("ignore") ss.json_packer(dict(a=math.inf)) def test_message_cls(): m = ss.Message(dict(a=1)) foo = dict(m) # type:ignore assert foo["a"] == 1 assert m["a"] == 1, m["a"] assert "a" in m assert str(m) == "{'a': 1}" def test_session_factory(): s = ss.SessionFactory() s.log.info(str(s.context)) s.context.destroy() jupyter_client-8.6.2/tests/test_ssh.py000066400000000000000000000006571462351563100201550ustar00rootroot00000000000000import socket import pytest from jupyter_client.ssh.tunnel import open_tunnel, select_random_ports def test_random_ports(): for _ in range(4096): ports = select_random_ports(10) assert len(ports) == 10 for p in ports: assert ports.count(p) == 1 def test_open_tunnel(): with pytest.raises((RuntimeError, socket.error)): open_tunnel("tcp://localhost:1234", "does.not.exist") jupyter_client-8.6.2/tests/utils.py000066400000000000000000000150251462351563100174540ustar00rootroot00000000000000"""Testing utils for jupyter_client tests """ import json import os import sys from typing import Dict import pytest from jupyter_client import ( AsyncKernelManager, AsyncMultiKernelManager, KernelManager, MultiKernelManager, ) pjoin = os.path.join skip_win32 = pytest.mark.skipif(sys.platform.startswith("win"), reason="Windows") sample_kernel_json = { "argv": ["cat", "{connection_file}"], "display_name": "Test kernel", } def install_kernel(kernels_dir, argv=None, name="test", display_name=None): """install a kernel in a kernels directory""" kernel_dir = pjoin(kernels_dir, name) os.makedirs(kernel_dir) kernel_json = { "argv": argv or sample_kernel_json["argv"], "display_name": display_name or sample_kernel_json["display_name"], } json_file = pjoin(kernel_dir, "kernel.json") with open(json_file, "w") as f: json.dump(kernel_json, f) return kernel_dir class RecordCallMixin: method_calls: Dict[str, int] def __init__(self, **kwargs): super().__init__(**kwargs) self.method_calls = {} def record(self, method_name: str) -> None: if method_name not in self.method_calls: self.method_calls[method_name] = 0 self.method_calls[method_name] += 1 def call_count(self, method_name: str) -> int: if method_name not in self.method_calls: self.method_calls[method_name] = 0 return self.method_calls[method_name] def reset_counts(self) -> None: for record in self.method_calls: self.method_calls[record] = 0 def subclass_recorder(f): def wrapped(self, *args, **kwargs): # record this call self.record(f.__name__) method = getattr(self._superclass, f.__name__) # call the superclass method r = method(self, *args, **kwargs) # call anything defined in the actual class method f(self, *args, **kwargs) return r return wrapped class KMSubclass(RecordCallMixin): @subclass_recorder def start_kernel(self, **kw): """Record call and defer to superclass""" @subclass_recorder def shutdown_kernel(self, now=False, restart=False): """Record call and defer to superclass""" @subclass_recorder def _async_shutdown_kernel(self, now=False, restart=False): """Record call and defer to superclass""" @subclass_recorder def restart_kernel(self, now=False, **kw): """Record call and defer to superclass""" @subclass_recorder def interrupt_kernel(self): """Record call and defer to superclass""" @subclass_recorder def _async_interrupt_kernel(self): """Record call and defer to superclass""" @subclass_recorder def request_shutdown(self, restart=False): """Record call and defer to superclass""" @subclass_recorder def finish_shutdown(self, waittime=None, pollinterval=0.1, restart=False): """Record call and defer to superclass""" @subclass_recorder def _async_launch_kernel(self, kernel_cmd, **kw): """Record call and defer to superclass""" @subclass_recorder def _async_kill_kernel(self): """Record call and defer to superclass""" @subclass_recorder def cleanup_resources(self, restart=False): """Record call and defer to superclass""" @subclass_recorder def _async_cleanup_resources(self, restart=False): """Record call and defer to superclass""" @subclass_recorder def signal_kernel(self, signum): """Record call and defer to superclass""" @subclass_recorder def _async_signal_kernel(self, signum): """Record call and defer to superclass""" @subclass_recorder def is_alive(self): """Record call and defer to superclass""" @subclass_recorder def _async_is_alive(self): """Record call and defer to superclass""" @subclass_recorder def _async_send_kernel_sigterm(self, restart=False): """Record call and defer to superclass""" class SyncKMSubclass(KMSubclass, KernelManager): """Used to test subclass hierarchies to ensure methods are called when expected.""" _superclass = KernelManager class AsyncKMSubclass(KMSubclass, AsyncKernelManager): """Used to test subclass hierarchies to ensure methods are called when expected.""" _superclass = AsyncKernelManager class MKMSubclass(RecordCallMixin): def _kernel_manager_class_default(self): return "tests.utils.SyncKMSubclass" @subclass_recorder def get_kernel(self, kernel_id): """Record call and defer to superclass""" @subclass_recorder def remove_kernel(self, kernel_id): """Record call and defer to superclass""" @subclass_recorder def start_kernel(self, *, kernel_name=None, **kwargs): """Record call and defer to superclass""" @subclass_recorder def _async_start_kernel(self, *, kernel_name=None, **kwargs): """Record call and defer to superclass""" @subclass_recorder def shutdown_kernel(self, kernel_id, now=False, restart=False): """Record call and defer to superclass""" @subclass_recorder def restart_kernel(self, kernel_id, now=False): """Record call and defer to superclass""" @subclass_recorder def interrupt_kernel(self, kernel_id): """Record call and defer to superclass""" @subclass_recorder def request_shutdown(self, kernel_id, restart=False): """Record call and defer to superclass""" @subclass_recorder def _async_request_shutdown(self, kernel_id, restart=False): """Record call and defer to superclass""" @subclass_recorder def finish_shutdown(self, kernel_id, waittime=None, pollinterval=0.1, restart=False): """Record call and defer to superclass""" @subclass_recorder def _async_finish_shutdown(self, kernel_id, waittime=None, pollinterval=0.1, restart=False): """Record call and defer to superclass""" @subclass_recorder def cleanup_resources(self, kernel_id, restart=False): """Record call and defer to superclass""" @subclass_recorder def shutdown_all(self, now=False): """Record call and defer to superclass""" class SyncMKMSubclass(MKMSubclass, MultiKernelManager): _superclass = MultiKernelManager def _kernel_manager_class_default(self): return "tests.utils.SyncKMSubclass" class AsyncMKMSubclass(MKMSubclass, AsyncMultiKernelManager): _superclass = AsyncMultiKernelManager def _kernel_manager_class_default(self): return "tests.utils.AsyncKMSubclass"