pax_global_header00006660000000000000000000000064141700415350014512gustar00rootroot0000000000000052 comment=996dd5c0433ea73a10ad113a8886b8766784c3cd ipykernel-6.7.0/000077500000000000000000000000001417004153500135265ustar00rootroot00000000000000ipykernel-6.7.0/.coveragerc000066400000000000000000000000371417004153500156470ustar00rootroot00000000000000[run] omit = ipykernel/tests/* ipykernel-6.7.0/.github/000077500000000000000000000000001417004153500150665ustar00rootroot00000000000000ipykernel-6.7.0/.github/workflows/000077500000000000000000000000001417004153500171235ustar00rootroot00000000000000ipykernel-6.7.0/.github/workflows/check-release.yml000066400000000000000000000015721417004153500223460ustar00rootroot00000000000000name: Check Release on: push: branches: ["master"] pull_request: branches: ["*"] jobs: check_release: runs-on: ubuntu-latest strategy: matrix: group: [check_release, link_check] steps: - name: Checkout uses: actions/checkout@v2 - name: Checkout uses: actions/checkout@v2 - name: Base Setup uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - name: Install Dependencies run: | pip install -e . - name: Check Release if: ${{ matrix.group == 'check_release' }} uses: jupyter-server/jupyter_releaser/.github/actions/check-release@v1 with: token: ${{ secrets.GITHUB_TOKEN }} - name: Run Link Check if: ${{ matrix.group == 'link_check' }} uses: jupyter-server/jupyter_releaser/.github/actions/check-links@v1 ipykernel-6.7.0/.github/workflows/ci.yml000066400000000000000000000062421417004153500202450ustar00rootroot00000000000000name: ipykernel tests on: push: branches: "master" pull_request: branches: "*" jobs: build: runs-on: ${{ matrix.os }}-latest strategy: fail-fast: false matrix: os: [ubuntu, macos, windows] python-version: [ '3.7', '3.8', '3.9', '3.10', 'pypy-3.7' ] exclude: - os: windows python-version: pypy-3.7 steps: - name: Checkout uses: actions/checkout@v2 - name: Base Setup uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - name: Install the Python dependencies run: | pip install --pre --upgrade --upgrade-strategy=eager .[test] codecov - name: Install matplotlib if: ${{ matrix.os != 'macos' && matrix.python-version != 'pypy3' }} run: | pip install matplotlib || echo 'failed to install matplotlib' - name: Install alternate event loops if: ${{ matrix.os != 'windows' }} run: | pip install curio || echo 'ignoring curio install failure' pip install trio || echo 'ignoring trio install failure' - name: List installed packages run: | pip uninstall pipx -y pip install pipdeptree pipdeptree pipdeptree --reverse pip freeze pip check - name: Run the tests timeout-minutes: 10 if: ${{ !startsWith( matrix.python-version, 'pypy' ) }} run: | pytest ipykernel -vv -s --cov ipykernel --cov-branch --cov-report term-missing:skip-covered --durations 10 - name: Run the tests on pypy timeout-minutes: 15 if: ${{ startsWith( matrix.python-version, 'pypy' ) }} run: | pytest -vv ipykernel - name: Build the docs if: ${{ matrix.os == 'ubuntu' && matrix.python-version == '3.9'}} run: | cd docs pip install -r requirements.txt make html - name: Coverage run: | codecov check_docstrings: runs-on: ${{ matrix.os }}-latest strategy: fail-fast: false matrix: os: [ubuntu] python-version: [ '3.9' ] exclude: - os: windows python-version: pypy3 steps: - name: Checkout uses: actions/checkout@v2 - name: Base Setup uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - name: Install the Python dependencies run: | pip install --pre --upgrade --upgrade-strategy=eager . pip install velin - name: Check Docstrings run: | velin . --check --compact test_without_debugpy: runs-on: ${{ matrix.os }}-latest strategy: fail-fast: false matrix: os: [ubuntu] python-version: [ '3.9' ] steps: - name: Checkout uses: actions/checkout@v2 - name: Base Setup uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - name: Install the Python dependencies without debugpy run: | pip install --pre --upgrade --upgrade-strategy=eager .[test] pip uninstall --yes debugpy - name: List installed packages run: | pip freeze - name: Run the tests timeout-minutes: 10 run: | pytest ipykernel -vv -s --durations 10 ipykernel-6.7.0/.github/workflows/downstream.yml000066400000000000000000000024111417004153500220270ustar00rootroot00000000000000name: Test downstream projects on: push: branches: "master" pull_request: branches: "*" jobs: downstream1: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v2 - name: Base Setup uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - name: Test nbclient uses: jupyterlab/maintainer-tools/.github/actions/downstream-test@v1 with: package_name: nbclient env_values: IPYKERNEL_CELL_NAME=\ - name: Test jupyter_client uses: jupyterlab/maintainer-tools/.github/actions/downstream-test@v1 with: package_name: jupyter_client downstream2: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v2 - name: Base Setup uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - name: Test ipyparallel uses: jupyterlab/maintainer-tools/.github/actions/downstream-test@v1 with: package_name: ipyparallel - name: Test jupyter_kernel_test run: | git clone https://github.com/jupyter/jupyter_kernel_test.git cd jupyter_kernel_test pip install -e ".[test]" python test_ipykernel.py ipykernel-6.7.0/.github/workflows/enforce-label.yml000066400000000000000000000004241417004153500223440ustar00rootroot00000000000000name: Enforce PR label on: pull_request: types: [labeled, unlabeled, opened, edited, synchronize] jobs: enforce-label: runs-on: ubuntu-latest steps: - name: enforce-triage-label uses: jupyterlab/maintainer-tools/.github/actions/enforce-label@v1 ipykernel-6.7.0/.gitignore000066400000000000000000000006141417004153500155170ustar00rootroot00000000000000MANIFEST build cover dist _build docs/man/*.gz docs/source/api/generated docs/source/config/options docs/source/interactive/magics-generated.txt docs/gh-pages IPython/html/notebook/static/mathjax IPython/html/static/style/*.map *.py[co] __pycache__ *.egg-info *~ *.bak .ipynb_checkpoints .tox .DS_Store \#*# .#* .coverage data_kernelspec .pytest_cache # copied changelog file docs/changelog.md ipykernel-6.7.0/.mailmap000066400000000000000000000250671417004153500151610ustar00rootroot00000000000000A. J. Holyoake ajholyoake Aaron Culich Aaron Culich Aron Ahmadia ahmadia Benjamin Ragan-Kelley Benjamin Ragan-Kelley Min RK Benjamin Ragan-Kelley MinRK Barry Wark Barry Wark Ben Edwards Ben Edwards Bradley M. Froehle Bradley M. Froehle Bradley M. Froehle Bradley Froehle Brandon Parsons Brandon Parsons Brian E. Granger Brian Granger Brian E. Granger Brian Granger <> Brian E. Granger bgranger <> Brian E. Granger bgranger Christoph Gohlke cgohlke Cyrille Rossant rossant Damián Avila damianavila Damián Avila damianavila Damon Allen damontallen Darren Dale darren.dale <> Darren Dale Darren Dale <> Dav Clark Dav Clark <> Dav Clark Dav Clark David Hirschfeld dhirschfeld David P. Sanders David P. Sanders David Warde-Farley David Warde-Farley <> Doug Blank Doug Blank Eugene Van den Bulke Eugene Van den Bulke Evan Patterson Evan Patterson Evan Patterson Evan Patterson Evan Patterson epatters Evan Patterson epatters Ernie French Ernie French Ernie French ernie french Ernie French ernop Fernando Perez Fernando Perez Fernando Perez Fernando Perez fperez <> Fernando Perez fptest <> Fernando Perez fptest1 <> Fernando Perez Fernando Perez Fernando Perez Fernando Perez <> Fernando Perez Fernando Perez Frank Murphy Frank Murphy Gabriel Becker gmbecker Gael Varoquaux gael.varoquaux <> Gael Varoquaux gvaroquaux Gael Varoquaux Gael Varoquaux <> Ingolf Becker watercrossing Jake Vanderplas Jake Vanderplas Jakob Gager jakobgager Jakob Gager jakobgager Jakob Gager jakobgager Jason Grout Jason Grout Jason Gors jason gors Jason Gors jgors Jens Hedegaard Nielsen Jens Hedegaard Nielsen Jens Hedegaard Nielsen Jens H Nielsen Jens Hedegaard Nielsen Jens H. Nielsen Jez Ng Jez Ng Jonathan Frederic Jonathan Frederic Jonathan Frederic Jonathan Frederic Jonathan Frederic Jonathan Frederic Jonathan Frederic jon Jonathan Frederic U-Jon-PC\Jon Jonathan March Jonathan March Jonathan March jdmarch Jörgen Stenarson Jörgen Stenarson Jörgen Stenarson Jorgen Stenarson Jörgen Stenarson Jorgen Stenarson <> Jörgen Stenarson jstenar Jörgen Stenarson jstenar <> Jörgen Stenarson Jörgen Stenarson Juergen Hasch juhasch Juergen Hasch juhasch Julia Evans Julia Evans Kester Tong KesterTong Kyle Kelley Kyle Kelley Kyle Kelley rgbkrk Laurent Dufréchou Laurent Dufréchou Laurent Dufréchou laurent dufrechou <> Laurent Dufréchou laurent.dufrechou <> Laurent Dufréchou Laurent Dufrechou <> Laurent Dufréchou laurent.dufrechou@gmail.com <> Laurent Dufréchou ldufrechou Lorena Pantano Lorena Luis Pedro Coelho Luis Pedro Coelho Marc Molla marcmolla Martín Gaitán Martín Gaitán Matthias Bussonnier Matthias BUSSONNIER Matthias Bussonnier Bussonnier Matthias Matthias Bussonnier Matthias BUSSONNIER Matthias Bussonnier Matthias Bussonnier Michael Droettboom Michael Droettboom Nicholas Bollweg Nicholas Bollweg (Nick) Nicolas Rougier Nikolay Koldunov Nikolay Koldunov Omar Andrés Zapata Mesa Omar Andres Zapata Mesa Omar Andrés Zapata Mesa Omar Andres Zapata Mesa Pankaj Pandey Pankaj Pandey Pascal Schetelat pascal-schetelat Paul Ivanov Paul Ivanov Pauli Virtanen Pauli Virtanen <> Pauli Virtanen Pauli Virtanen Pierre Gerold Pierre Gerold Pietro Berkes Pietro Berkes Piti Ongmongkolkul piti118 Prabhu Ramachandran Prabhu Ramachandran <> Puneeth Chaganti Puneeth Chaganti Robert Kern rkern <> Robert Kern Robert Kern Robert Kern Robert Kern Robert Kern Robert Kern <> Robert Marchman Robert Marchman Satrajit Ghosh Satrajit Ghosh Satrajit Ghosh Satrajit Ghosh Scott Sanderson Scott Sanderson smithj1 smithj1 smithj1 smithj1 Steven Johnson stevenJohnson Steven Silvester blink1073 S. Weber s8weber Stefan van der Walt Stefan van der Walt Silvia Vinyes Silvia Silvia Vinyes silviav12 Sylvain Corlay Sylvain Corlay sylvain.corlay Ted Drain TD22057 Théophile Studer Théophile Studer Thomas Kluyver Thomas Thomas Spura Thomas Spura Timo Paulssen timo vds vds2212 vds vds Ville M. Vainio Ville M. Vainio ville Ville M. Vainio ville Ville M. Vainio vivainio <> Ville M. Vainio Ville M. Vainio Ville M. Vainio Ville M. Vainio Walter Doerwald walter.doerwald <> Walter Doerwald Walter Doerwald <> W. Trevor King W. Trevor King Yoval P. y-p ipykernel-6.7.0/CHANGELOG.md000066400000000000000000001422761417004153500153530ustar00rootroot00000000000000# Changes in IPython kernel ## 6.7.0 ([Full Changelog](https://github.com/ipython/ipykernel/compare/v6.6.1...0be80cbc81927f4fb20343840bf5834b48884717)) ### Enhancements made - Add usage_request and usage_reply based on psutil [#805](https://github.com/ipython/ipykernel/pull/805) ([@echarles](https://github.com/echarles)) ### Bugs fixed - Removed DebugStdLib from arguments of attach [#839](https://github.com/ipython/ipykernel/pull/839) ([@JohanMabille](https://github.com/JohanMabille)) - Normalize debugger temp file paths on Windows [#838](https://github.com/ipython/ipykernel/pull/838) ([@kycutler](https://github.com/kycutler)) - Breakpoint in cell with leading empty lines may be ignored [#829](https://github.com/ipython/ipykernel/pull/829) ([@fcollonval](https://github.com/fcollonval)) ### Maintenance and upkeep improvements - Skip on PyPy, seem to fail. [#837](https://github.com/ipython/ipykernel/pull/837) ([@Carreau](https://github.com/Carreau)) - Remove pipx to fix conflicts [#835](https://github.com/ipython/ipykernel/pull/835) ([@Carreau](https://github.com/Carreau)) - Remove impossible skipif. [#834](https://github.com/ipython/ipykernel/pull/834) ([@Carreau](https://github.com/Carreau)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/ipython/ipykernel/graphs/contributors?from=2022-01-03&to=2022-01-13&type=c)) [@Carreau](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3ACarreau+updated%3A2022-01-03..2022-01-13&type=Issues) | [@echarles](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aecharles+updated%3A2022-01-03..2022-01-13&type=Issues) | [@fcollonval](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Afcollonval+updated%3A2022-01-03..2022-01-13&type=Issues) | [@JohanMabille](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3AJohanMabille+updated%3A2022-01-03..2022-01-13&type=Issues) | [@kycutler](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Akycutler+updated%3A2022-01-03..2022-01-13&type=Issues) ## 6.6.1 ([Full Changelog](https://github.com/ipython/ipykernel/compare/v6.6.0...bdce14b32ca8cc8f4b1635ea47200f0828ec1e05)) ### Bugs fixed - PR: do_one_iteration is a coroutine [#830](https://github.com/ipython/ipykernel/pull/830) ([@impact27](https://github.com/impact27)) ### Maintenance and upkeep improvements - Clean python 2 artifacts. Fix #826 [#827](https://github.com/ipython/ipykernel/pull/827) ([@penguinolog](https://github.com/penguinolog)) ### Documentation improvements - Fix title position in changelog [#828](https://github.com/ipython/ipykernel/pull/828) ([@fcollonval](https://github.com/fcollonval)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/ipython/ipykernel/graphs/contributors?from=2021-12-01&to=2022-01-03&type=c)) [@blink1073](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Ablink1073+updated%3A2021-12-01..2022-01-03&type=Issues) | [@ccordoba12](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Accordoba12+updated%3A2021-12-01..2022-01-03&type=Issues) | [@fcollonval](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Afcollonval+updated%3A2021-12-01..2022-01-03&type=Issues) | [@impact27](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aimpact27+updated%3A2021-12-01..2022-01-03&type=Issues) | [@ivanov](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aivanov+updated%3A2021-12-01..2022-01-03&type=Issues) | [@penguinolog](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Apenguinolog+updated%3A2021-12-01..2022-01-03&type=Issues) ## 6.6.0 ([Full Changelog](https://github.com/ipython/ipykernel/compare/v6.5.1...9566304175d844c23a1f2b1d70c10df475ed2868)) ### Enhancements made - Set `debugOptions` for breakpoints in python standard library source [#812](https://github.com/ipython/ipykernel/pull/812) ([@echarles](https://github.com/echarles)) - Send `omit_sections` to IPython to choose which sections of documentation you do not want [#809](https://github.com/ipython/ipykernel/pull/809) ([@fasiha](https://github.com/fasiha)) ### Bugs fixed - Added missing `exceptionPaths` field to `debugInfo` reply [#814](https://github.com/ipython/ipykernel/pull/814) ([@JohanMabille](https://github.com/JohanMabille)) ### Maintenance and upkeep improvements - Test `jupyter_kernel_test` as downstream [#813](https://github.com/ipython/ipykernel/pull/813) ([@blink1073](https://github.com/blink1073)) - Remove `nose` dependency [#808](https://github.com/ipython/ipykernel/pull/808) ([@Kojoley](https://github.com/Kojoley)) - Add explicit encoding to open calls in debugger [#807](https://github.com/ipython/ipykernel/pull/807) ([@dlukes](https://github.com/dlukes)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/ipython/ipykernel/graphs/contributors?from=2021-11-18&to=2021-12-01&type=c)) [@blink1073](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Ablink1073+updated%3A2021-11-18..2021-12-01&type=Issues) | [@dlukes](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Adlukes+updated%3A2021-11-18..2021-12-01&type=Issues) | [@echarles](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aecharles+updated%3A2021-11-18..2021-12-01&type=Issues) | [@fasiha](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Afasiha+updated%3A2021-11-18..2021-12-01&type=Issues) | [@JohanMabille](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3AJohanMabille+updated%3A2021-11-18..2021-12-01&type=Issues) | [@Kojoley](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3AKojoley+updated%3A2021-11-18..2021-12-01&type=Issues) ## 6.5.1 ([Full Changelog](https://github.com/ipython/ipykernel/compare/v6.5.0...1ef2017781435d54348fbb170b8c5d096e3e1351)) ### Bugs fixed - Fix the temp file name created by the debugger [#801](https://github.com/ipython/ipykernel/pull/801) ([@eastonsuo](https://github.com/eastonsuo)) ### Maintenance and upkeep improvements - Enforce labels on PRs [#803](https://github.com/ipython/ipykernel/pull/803) ([@blink1073](https://github.com/blink1073)) - Unpin `IPython`, and remove some dependencies on it. [#796](https://github.com/ipython/ipykernel/pull/796) ([@Carreau](https://github.com/Carreau)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/ipython/ipykernel/graphs/contributors?from=2021-11-01&to=2021-11-18&type=c)) [@blink1073](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Ablink1073+updated%3A2021-11-01..2021-11-18&type=Issues) | [@Carreau](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3ACarreau+updated%3A2021-11-01..2021-11-18&type=Issues) | [@eastonsuo](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aeastonsuo+updated%3A2021-11-01..2021-11-18&type=Issues) ## 6.5.0 ([Full Changelog](https://github.com/ipython/ipykernel/compare/v6.4.2...e8d4f66e0f65e284aab444c53e9812dbbc814cb2)) ### Bugs fixed - Fix rich variables inspection [#793](https://github.com/ipython/ipykernel/pull/793) ([@fcollonval](https://github.com/fcollonval)) - Do not call `setQuitOnLastWindowClosed()` on a `QCoreApplication` [#791](https://github.com/ipython/ipykernel/pull/791) ([@stukowski](https://github.com/stukowski)) ### Maintenance and upkeep improvements - Drop `ipython_genutils` requirement [#792](https://github.com/ipython/ipykernel/pull/792) ([@penguinolog](https://github.com/penguinolog)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/ipython/ipykernel/graphs/contributors?from=2021-10-20&to=2021-11-01&type=c)) [@ccordoba12](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Accordoba12+updated%3A2021-10-20..2021-11-01&type=Issues) | [@fcollonval](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Afcollonval+updated%3A2021-10-20..2021-11-01&type=Issues) | [@penguinolog](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Apenguinolog+updated%3A2021-10-20..2021-11-01&type=Issues) | [@stukowski](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Astukowski+updated%3A2021-10-20..2021-11-01&type=Issues) ## 6.4.2 ([Full Changelog](https://github.com/ipython/ipykernel/compare/v6.4.1...231fd3c65f8a15e9e015546c0a6846e22df9ba2a)) ### Enhancements made - Enabled rich rendering of variables in the debugger [#787](https://github.com/ipython/ipykernel/pull/787) ([@JohanMabille](https://github.com/JohanMabille)) ### Bugs fixed - Remove setting of the eventloop function in the InProcessKernel [#781](https://github.com/ipython/ipykernel/pull/781) ([@rayosborn](https://github.com/rayosborn)) ### Maintenance and upkeep improvements - Add python version classifiers [#783](https://github.com/ipython/ipykernel/pull/783) ([@emuccino](https://github.com/emuccino)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/ipython/ipykernel/graphs/contributors?from=2021-09-10&to=2021-10-19&type=c)) [@emuccino](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aemuccino+updated%3A2021-09-10..2021-10-19&type=Issues) | [@JohanMabille](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3AJohanMabille+updated%3A2021-09-10..2021-10-19&type=Issues) | [@rayosborn](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Arayosborn+updated%3A2021-09-10..2021-10-19&type=Issues) ## 6.4.1 ([Full Changelog](https://github.com/ipython/ipykernel/compare/v6.4.0...4da7623c1ae733f32c0792d70e7af283a7b19d22)) ### Merged PRs - debugpy is now a build requirement [#773](https://github.com/ipython/ipykernel/pull/773) ([@minrk](https://github.com/minrk)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/ipython/ipykernel/graphs/contributors?from=2021-09-09&to=2021-09-10&type=c)) [@minrk](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aminrk+updated%3A2021-09-09..2021-09-10&type=Issues) ## 6.4.0 ([Full Changelog](https://github.com/ipython/ipykernel/compare/v6.3.1...1ba6b48a97877ff7a564af32c531618efb7d2a57)) ### Enhancements made - Make `json_clean` a no-op for `jupyter-client` >= 7 [#708](https://github.com/ipython/ipykernel/pull/708) ([@martinRenou](https://github.com/martinRenou)) ### Bugs fixed - Do not assume kernels have loops [#766](https://github.com/ipython/ipykernel/pull/766) ([@Carreau](https://github.com/Carreau)) - Fix undefined variable [#765](https://github.com/ipython/ipykernel/pull/765) ([@martinRenou](https://github.com/martinRenou)) ### Maintenance and upkeep improvements - Make `ipykernel` work without `debugpy` [#767](https://github.com/ipython/ipykernel/pull/767) ([@frenzymadness](https://github.com/frenzymadness)) - Stop using deprecated `recv_multipart` when using in-process socket. [#762](https://github.com/ipython/ipykernel/pull/762) ([@Carreau](https://github.com/Carreau)) - Update some warnings with instructions and version number. [#761](https://github.com/ipython/ipykernel/pull/761) ([@Carreau](https://github.com/Carreau)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/ipython/ipykernel/graphs/contributors?from=2021-08-31&to=2021-09-09&type=c)) [@Carreau](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3ACarreau+updated%3A2021-08-31..2021-09-09&type=Issues) | [@frenzymadness](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Afrenzymadness+updated%3A2021-08-31..2021-09-09&type=Issues) | [@martinRenou](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3AmartinRenou+updated%3A2021-08-31..2021-09-09&type=Issues) | [@minrk](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aminrk+updated%3A2021-08-31..2021-09-09&type=Issues) ## 6.3 ## 6.3.1 ([Full Changelog](https://github.com/ipython/ipykernel/compare/v6.3.0...0b4a8eaa080fc11e240ada9c44c95841463da58c)) ### Merged PRs - Add dependency on IPython genutils. [#756](https://github.com/ipython/ipykernel/pull/756) ([@Carreau](https://github.com/Carreau)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/ipython/ipykernel/graphs/contributors?from=2021-08-30&to=2021-08-31&type=c)) [@Carreau](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3ACarreau+updated%3A2021-08-30..2021-08-31&type=Issues) ## 6.3.0 ([Full Changelog](https://github.com/ipython/ipykernel/compare/6.2.0...07af2633ca88eda583e13649279a5b98473618a2)) ### Enhancements made - Add deep variable inspection [#753](https://github.com/ipython/ipykernel/pull/753) ([@JohanMabille](https://github.com/JohanMabille)) - Add `IPKernelApp.capture_fd_output` config to disable FD-level capture [#752](https://github.com/ipython/ipykernel/pull/752) ([@minrk](https://github.com/minrk)) ### Maintenance and upkeep improvements - Remove more `nose` test references [#750](https://github.com/ipython/ipykernel/pull/750) ([@blink1073](https://github.com/blink1073)) - Remove `nose` `skipIf` in favor of `pytest` [#748](https://github.com/ipython/ipykernel/pull/748) ([@Carreau](https://github.com/Carreau)) - Remove more `nose` [#747](https://github.com/ipython/ipykernel/pull/747) ([@Carreau](https://github.com/Carreau)) - Set up release helper plumbing [#745](https://github.com/ipython/ipykernel/pull/745) ([@afshin](https://github.com/afshin)) - Test downstream projects [#635](https://github.com/ipython/ipykernel/pull/635) ([@davidbrochart](https://github.com/davidbrochart)) ### Contributors to this release ([GitHub contributors page for this release](https://github.com/ipython/ipykernel/graphs/contributors?from=2021-08-16&to=2021-08-30&type=c)) [@afshin](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aafshin+updated%3A2021-08-16..2021-08-30&type=Issues) | [@blink1073](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Ablink1073+updated%3A2021-08-16..2021-08-30&type=Issues) | [@Carreau](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3ACarreau+updated%3A2021-08-16..2021-08-30&type=Issues) | [@ccordoba12](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Accordoba12+updated%3A2021-08-16..2021-08-30&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Adavidbrochart+updated%3A2021-08-16..2021-08-30&type=Issues) | [@JohanMabille](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3AJohanMabille+updated%3A2021-08-16..2021-08-30&type=Issues) | [@kevin-bates](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Akevin-bates+updated%3A2021-08-16..2021-08-30&type=Issues) | [@minrk](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aminrk+updated%3A2021-08-16..2021-08-30&type=Issues) | [@SylvainCorlay](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3ASylvainCorlay+updated%3A2021-08-16..2021-08-30&type=Issues) ## 6.2 ## 6.2.0 ### Enhancements made - Add Support for Message Based Interrupt [#741](https://github.com/ipython/ipykernel/pull/741) ([@afshin](https://github.com/afshin)) ### Maintenance and upkeep improvements - Remove some more dependency on nose/iptest [#743](https://github.com/ipython/ipykernel/pull/743) ([@Carreau](https://github.com/Carreau)) - Remove block param from get_msg() [#736](https://github.com/ipython/ipykernel/pull/736) ([@davidbrochart](https://github.com/davidbrochart)) ## 6.1 ## 6.1.0 ### Enhancements made - Implemented `richInspectVariable` request handler [#734](https://github.com/ipython/ipykernel/pull/734) ([@JohanMabille](https://github.com/JohanMabille)) ### Maintenance and upkeep improvements - Bump `importlib-metadata` limit for `python<3.8` [#738](https://github.com/ipython/ipykernel/pull/738) ([@ltalirz](https://github.com/ltalirz)) ### Bug Fixes - Fix exception raised by `OutStream.write` [#726](https://github.com/ipython/ipykernel/pull/726) ([@SimonKrughoff](https://github.com/SimonKrughoff)) ## 6.0 ## 6.0.3 - `KernelApp`: rename ports variable to avoid override [#731](https://github.com/ipython/ipykernel/pull/731) ([@amorenoz](https://github.com/amorenoz)) ## 6.0.2 ### Bugs fixed - Add watchfd keyword to InProcessKernel OutStream initialization [#727](https://github.com/ipython/ipykernel/pull/727) ([@rayosborn](https://github.com/rayosborn)) - Fix typo in eventloops.py [#711](https://github.com/ipython/ipykernel/pull/711) ([@selasley](https://github.com/selasley)) - [bugfix] fix in setup.py (comma before appnope) [#709](https://github.com/ipython/ipykernel/pull/709) ([@jstriebel](https://github.com/jstriebel)) ### Maintenance and upkeep improvements - Add upper bound to dependency versions. [#714](https://github.com/ipython/ipykernel/pull/714) ([@martinRenou](https://github.com/martinRenou)) - Replace non-existing function. [#723](https://github.com/ipython/ipykernel/pull/723) ([@Carreau](https://github.com/Carreau)) - Remove unused variables [#722](https://github.com/ipython/ipykernel/pull/722) ([@Carreau](https://github.com/Carreau)) - Do not use bare except [#721](https://github.com/ipython/ipykernel/pull/721) ([@Carreau](https://github.com/Carreau)) - misc whitespace and line too long [#720](https://github.com/ipython/ipykernel/pull/720) ([@Carreau](https://github.com/Carreau)) - Formatting: remove semicolon [#719](https://github.com/ipython/ipykernel/pull/719) ([@Carreau](https://github.com/Carreau)) - Clean most flake8 unused import warnings. [#718](https://github.com/ipython/ipykernel/pull/718) ([@Carreau](https://github.com/Carreau)) - Minimal flake8 config [#717](https://github.com/ipython/ipykernel/pull/717) ([@Carreau](https://github.com/Carreau)) - Remove CachingCompiler's filename_mapper [#710](https://github.com/ipython/ipykernel/pull/710) ([@martinRenou](https://github.com/martinRenou)) ## 6.0.1 - Fix Tk and asyncio event loops [#704](https://github.com/ipython/ipykernel/pull/704) ([@ccordoba12](https://github.com/ccordoba12)) - Stringify variables that are not json serializable in inspectVariable [#702](https://github.com/ipython/ipykernel/pull/702) ([@JohanMabille](https://github.com/JohanMabille)) ## 6.0.0 ([Full Changelog](https://github.com/ipython/ipykernel/compare/aba2179420a3fa81ee6b8a13f928bf9e5ce50716...6d04ad2bdccd0dc0daf20f8d53555174b5fefc7b)) IPykernel 6.0 is the first major release in about two years, that brings a number of improvements, code cleanup, and new features to IPython. You should be able to view all closed issues and merged Pull Request for this milestone [on GitHub](https://github.com/ipython/ipykernel/issues?q=milestone%3A6.0+is%3Aclosed+), as for any major releases, we advise greater care when updating that for minor release and welcome any feedback (~50 Pull-requests). IPykernel 6 should contain all changes of the 5.x series, in addition to the following non-exhaustive changes. - Support for the debugger protocol, when using `JupyterLab`, `RetroLab` or any frontend supporting the debugger protocol you should have access to the debugger functionalities. - The control channel on IPykernel 6.0 is run in a separate thread, this may change the order in which messages are processed, though this change was necessary to accommodate the debugger. - We now have a new dependency: `matplotlib-inline`, this helps to separate the circular dependency between IPython/IPykernel and matplotlib. - All outputs to stdout/stderr should now be captured, including subprocesses and output of compiled libraries (blas, lapack....). In notebook server, some outputs that would previously go to the notebooks logs will now both head to notebook logs and in notebooks outputs. In terminal frontend like Jupyter Console, Emacs or other, this may ends up as duplicated outputs. - coroutines are now native (async-def) , instead of using tornado's `@gen.coroutine` - OutStreams can now be configured to report `istty() == True`, while this should make some output nicer (for example colored), it is likely to break others. Use with care. ### New features added - Implementation of the debugger [#597](https://github.com/ipython/ipykernel/pull/597) ([@JohanMabille](https://github.com/JohanMabille)) ### Enhancements made - Make the `isatty` method of `OutStream` return `true` [#683](https://github.com/ipython/ipykernel/pull/683) ([@peendebak](https://github.com/peendebak)) - Allow setting cell name [#652](https://github.com/ipython/ipykernel/pull/652) ([@davidbrochart](https://github.com/davidbrochart)) - Try to capture all file descriptor output and err [#630](https://github.com/ipython/ipykernel/pull/630) ([@Carreau](https://github.com/Carreau)) - Implemented `inspectVariables` request [#624](https://github.com/ipython/ipykernel/pull/624) ([@JohanMabille](https://github.com/JohanMabille)) - Specify `ipykernel` in kernelspec [#616](https://github.com/ipython/ipykernel/pull/616) ([@SylvainCorlay](https://github.com/SylvainCorlay)) - Use `matplotlib-inline` [#591](https://github.com/ipython/ipykernel/pull/591) ([@martinRenou](https://github.com/martinRenou)) - Run control channel in separate thread [#585](https://github.com/ipython/ipykernel/pull/585) ([@SylvainCorlay](https://github.com/SylvainCorlay)) ### Bugs fixed - Remove references to deprecated `ipyparallel` [#695](https://github.com/ipython/ipykernel/pull/695) ([@minrk](https://github.com/minrk)) - Return len of item written to `OutStream` [#685](https://github.com/ipython/ipykernel/pull/685) ([@Carreau](https://github.com/Carreau)) - Call metadata methods on abort replies [#684](https://github.com/ipython/ipykernel/pull/684) ([@minrk](https://github.com/minrk)) - Fix keyboard interrupt issue in `dispatch_shell` [#673](https://github.com/ipython/ipykernel/pull/673) ([@marcoamonteiro](https://github.com/marcoamonteiro)) - Update `Trio` mode for compatibility with `Trio >= 0.18.0` [#627](https://github.com/ipython/ipykernel/pull/627) ([@mehaase](https://github.com/mehaase)) - Follow up `DeprecationWarning` Fix [#617](https://github.com/ipython/ipykernel/pull/617) ([@afshin](https://github.com/afshin)) - Flush control stream upon shutdown [#611](https://github.com/ipython/ipykernel/pull/611) ([@SylvainCorlay](https://github.com/SylvainCorlay)) - Fix Handling of `shell.should_run_async` [#605](https://github.com/ipython/ipykernel/pull/605) ([@afshin](https://github.com/afshin)) - Deacrease lag time for eventloop [#573](https://github.com/ipython/ipykernel/pull/573) ([@impact27](https://github.com/impact27)) - Fix "Socket operation on nonsocket" in downstream `nbclient` test. [#641](https://github.com/ipython/ipykernel/pull/641) ([@SylvainCorlay](https://github.com/SylvainCorlay)) - Stop control thread before closing sockets on it [#659](https://github.com/ipython/ipykernel/pull/659) ([@minrk](https://github.com/minrk)) - Fix debugging with native coroutines [#651](https://github.com/ipython/ipykernel/pull/651) ([@SylvainCorlay](https://github.com/SylvainCorlay)) - Fixup master build [#649](https://github.com/ipython/ipykernel/pull/649) ([@SylvainCorlay](https://github.com/SylvainCorlay)) - Fix parent header retrieval [#639](https://github.com/ipython/ipykernel/pull/639) ([@davidbrochart](https://github.com/davidbrochart)) - Add missing self [#636](https://github.com/ipython/ipykernel/pull/636) ([@Carreau](https://github.com/Carreau)) - Backwards compat with older versions of zmq [#665](https://github.com/ipython/ipykernel/pull/665) ([@mlucool](https://github.com/mlucool)) ### Maintenance and upkeep improvements - Remove pin on Jedi because that was already fixed in IPython [#692](https://github.com/ipython/ipykernel/pull/692) ([@ccordoba12](https://github.com/ccordoba12)) - Remove deprecated source parameter since 4.0.1 (2015) [#690](https://github.com/ipython/ipykernel/pull/690) ([@Carreau](https://github.com/Carreau)) - Remove deprecated `SocketABC` since 4.5.0 [#689](https://github.com/ipython/ipykernel/pull/689) ([@Carreau](https://github.com/Carreau)) - Remove deprecated profile options of `connect.py` [#688](https://github.com/ipython/ipykernel/pull/688) ([@Carreau](https://github.com/Carreau)) - Remove `ipykernel.codeutil` deprecated since IPykernel 4.3.1 (Feb 2016) [#687](https://github.com/ipython/ipykernel/pull/687) ([@Carreau](https://github.com/Carreau)) - Keep preferring `SelectorEventLoop` on Windows [#669](https://github.com/ipython/ipykernel/pull/669) ([@minrk](https://github.com/minrk)) - Add `Kernel.get_parent` to match `set_parent` [#661](https://github.com/ipython/ipykernel/pull/661) ([@minrk](https://github.com/minrk)) - Flush control queue prior to handling shell messages [#658](https://github.com/ipython/ipykernel/pull/658) ([@minrk](https://github.com/minrk)) - Add `Kernel.get_parent_header` [#657](https://github.com/ipython/ipykernel/pull/657) ([@minrk](https://github.com/minrk)) - Build docs only on Ubuntu: add jobs to check docstring formatting. [#644](https://github.com/ipython/ipykernel/pull/644) ([@Carreau](https://github.com/Carreau)) - Make deprecated `shell_streams` writable [#638](https://github.com/ipython/ipykernel/pull/638) ([@minrk](https://github.com/minrk)) - Use channel `get_msg` helper method [#634](https://github.com/ipython/ipykernel/pull/634) ([@davidbrochart](https://github.com/davidbrochart)) - Use native coroutines instead of tornado coroutines [#632](https://github.com/ipython/ipykernel/pull/632) ([@SylvainCorlay](https://github.com/SylvainCorlay)) - Make less use of `ipython_genutils` [#631](https://github.com/ipython/ipykernel/pull/631) ([@SylvainCorlay](https://github.com/SylvainCorlay)) - Run GitHub Actions on all branches [#625](https://github.com/ipython/ipykernel/pull/625) ([@afshin](https://github.com/afshin)) - Move Python-specific bits to ipkernel [#610](https://github.com/ipython/ipykernel/pull/610) ([@SylvainCorlay](https://github.com/SylvainCorlay)) - Update Python Requirement to 3.7 [#608](https://github.com/ipython/ipykernel/pull/608) ([@afshin](https://github.com/afshin)) - Replace import item from `ipython_genutils` to traitlets. [#601](https://github.com/ipython/ipykernel/pull/601) ([@Carreau](https://github.com/Carreau)) - Some removal of `ipython_genutils.py3compat`. [#600](https://github.com/ipython/ipykernel/pull/600) ([@Carreau](https://github.com/Carreau)) - Fixup `get_parent_header` call [#662](https://github.com/ipython/ipykernel/pull/662) ([@SylvainCorlay](https://github.com/SylvainCorlay)) - Update of `ZMQInteractiveshell`. [#643](https://github.com/ipython/ipykernel/pull/643) ([@Carreau](https://github.com/Carreau)) - Removed filtering of stack frames for testing [#633](https://github.com/ipython/ipykernel/pull/633) ([@JohanMabille](https://github.com/JohanMabille)) - Added 'type' field to variables returned by `inspectVariables` request [#628](https://github.com/ipython/ipykernel/pull/628) ([@JohanMabille](https://github.com/JohanMabille)) - Changed default timeout to 0.0 seconds for `stop_on_error_timeout` [#618](https://github.com/ipython/ipykernel/pull/618) ([@MSeal](https://github.com/MSeal)) - Attempt longer timeout [#615](https://github.com/ipython/ipykernel/pull/615) ([@SylvainCorlay](https://github.com/SylvainCorlay)) - Clean up release process and add tests [#596](https://github.com/ipython/ipykernel/pull/596) ([@afshin](https://github.com/afshin)) - Kernelspec: ensure path is writable before writing `kernel.json`. [#593](https://github.com/ipython/ipykernel/pull/593) ([@jellelicht](https://github.com/jellelicht)) - Add `configure_inline_support` and call it in the shell [#590](https://github.com/ipython/ipykernel/pull/590) ([@martinRenou](https://github.com/martinRenou)) ### Documentation improvements - Misc Updates to changelog for 6.0 [#686](https://github.com/ipython/ipykernel/pull/686) ([@Carreau](https://github.com/Carreau)) - Add 5.5.x Changelog entries [#672](https://github.com/ipython/ipykernel/pull/672) ([@blink1073](https://github.com/blink1073)) - Build docs only on ubuntu: add jobs to check docstring formatting. [#644](https://github.com/ipython/ipykernel/pull/644) ([@Carreau](https://github.com/Carreau)) - DOC: Autoreformat all docstrings. [#642](https://github.com/ipython/ipykernel/pull/642) ([@Carreau](https://github.com/Carreau)) - Bump Python to 3.8 in `readthedocs.yml` [#612](https://github.com/ipython/ipykernel/pull/612) ([@minrk](https://github.com/minrk)) - Fix typo [#663](https://github.com/ipython/ipykernel/pull/663) ([@SylvainCorlay](https://github.com/SylvainCorlay)) - Add release note to 5.5.0 about `stop_on_error_timeout` [#613](https://github.com/ipython/ipykernel/pull/613) ([@glentakahashi](https://github.com/glentakahashi)) - Move changelog to standard location [#604](https://github.com/ipython/ipykernel/pull/604) ([@afshin](https://github.com/afshin)) - Add changelog for 5.5 [#594](https://github.com/ipython/ipykernel/pull/594) ([@blink1073](https://github.com/blink1073)) - Change to markdown for changelog [#595](https://github.com/ipython/ipykernel/pull/595) ([@afshin](https://github.com/afshin)) ### Deprecations in 6.0 - `Kernel`s now support only a single shell stream, multiple streams will now be ignored. The attribute `Kernel.shell_streams` (plural) is deprecated in ipykernel 6.0. Use `Kernel.shell_stream` (singular) - `Kernel._parent_header` is deprecated, even though it was private. Use `.get_parent()` now. ### Removal in 6.0 - `ipykernel.codeutils` was deprecated since 4.x series (2016) and has been removed, please import similar functionalities from `ipyparallel` - remove `find_connection_file` and `profile` argument of `connect_qtconsole` and `get_connection_info`, deprecated since IPykernel 4.2.2 (2016). ### Contributors to this release ([GitHub contributors page for this release](https://github.com/ipython/ipykernel/graphs/contributors?from=2021-01-11&to=2021-06-29&type=c)) [@afshin](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aafshin+updated%3A2021-01-11..2021-06-29&type=Issues) | [@blink1073](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Ablink1073+updated%3A2021-01-11..2021-06-29&type=Issues) | [@Carreau](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3ACarreau+updated%3A2021-01-11..2021-06-29&type=Issues) | [@ccordoba12](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Accordoba12+updated%3A2021-01-11..2021-06-29&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Adavidbrochart+updated%3A2021-01-11..2021-06-29&type=Issues) | [@dsblank](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Adsblank+updated%3A2021-01-11..2021-06-29&type=Issues) | [@glentakahashi](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aglentakahashi+updated%3A2021-01-11..2021-06-29&type=Issues) | [@impact27](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aimpact27+updated%3A2021-01-11..2021-06-29&type=Issues) | [@ivanov](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aivanov+updated%3A2021-01-11..2021-06-29&type=Issues) | [@jellelicht](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Ajellelicht+updated%3A2021-01-11..2021-06-29&type=Issues) | [@jkablan](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Ajkablan+updated%3A2021-01-11..2021-06-29&type=Issues) | [@JohanMabille](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3AJohanMabille+updated%3A2021-01-11..2021-06-29&type=Issues) | [@kevin-bates](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Akevin-bates+updated%3A2021-01-11..2021-06-29&type=Issues) | [@marcoamonteiro](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Amarcoamonteiro+updated%3A2021-01-11..2021-06-29&type=Issues) | [@martinRenou](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3AmartinRenou+updated%3A2021-01-11..2021-06-29&type=Issues) | [@mehaase](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Amehaase+updated%3A2021-01-11..2021-06-29&type=Issues) | [@minrk](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aminrk+updated%3A2021-01-11..2021-06-29&type=Issues) | [@mlucool](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Amlucool+updated%3A2021-01-11..2021-06-29&type=Issues) | [@MSeal](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3AMSeal+updated%3A2021-01-11..2021-06-29&type=Issues) | [@peendebak](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Apeendebak+updated%3A2021-01-11..2021-06-29&type=Issues) | [@SylvainCorlay](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3ASylvainCorlay+updated%3A2021-01-11..2021-06-29&type=Issues) | [@tacaswell](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Atacaswell+updated%3A2021-01-11..2021-06-29&type=Issues) ## 5.5 ### 5.5.5 * Keep preferring SelectorEventLoop on Windows. [#669](https://github.com/ipython/ipykernel/pull/669) ### 5.5.4 * Import ``configure_inline_support`` from ``matplotlib_inline`` if available [#654](https://github.com/ipython/ipykernel/pull/654) ### 5.5.3 * Revert Backport of #605: Fix Handling of ``shell.should_run_async`` [#622](https://github.com/ipython/ipykernel/pull/622) ### 5.5.2 **Note:** This release was deleted from PyPI since it had breaking changes. * Changed default timeout to 0.0 seconds for stop_on_error_timeout. [#618](https://github.com/ipython/ipykernel/pull/618) ### 5.5.1 **Note:** This release was deleted from PyPI since it had breaking changes. * Fix Handling of ``shell.should_run_async``. [#605](https://github.com/ipython/ipykernel/pull/605) ### 5.5.0 * kernelspec: ensure path is writable before writing `kernel.json`. [#593](https://github.com/ipython/ipykernel/pull/593) * Add `configure_inline_support` and call it in the shell. [#590](https://github.com/ipython/ipykernel/pull/590) * Fix `stop_on_error_timeout` to now properly abort `execute_request`'s that fall within the timeout after an error. [#572](https://github.com/ipython/ipykernel/pull/572) ## 5.4 ### 5.4.3 * Rework `wait_for_ready` logic. [#578](https://github.com/ipython/ipykernel/pull/578) ### 5.4.2 * Revert \"Fix stop_on_error_timeout blocking other messages in queue\". [#570](https://github.com/ipython/ipykernel/pull/570) ### 5.4.1 * Invalid syntax in `ipykernel/log.py`. [#567](https://github.com/ipython/ipykernel/pull/567) ### 5.4.0 5.4.0 is generally focused on code quality improvements and tornado asyncio compatibility. * Add github actions, bail on asyncio patch for tornado 6.1. [#564](https://github.com/ipython/ipykernel/pull/564) * Start testing on Python 3.9. [#551](https://github.com/ipython/ipykernel/pull/551) * Fix stack levels for ipykernel\'s deprecation warnings and stop using some deprecated APIs. [#547](https://github.com/ipython/ipykernel/pull/547) * Add env parameter to kernel installation [#541](https://github.com/ipython/ipykernel/pull/541) * Fix stop_on_error_timeout blocking other messages in queue. [#539](https://github.com/ipython/ipykernel/pull/539) * Remove most of the python 2 compat code. [#537](https://github.com/ipython/ipykernel/pull/537) * Remove u-prefix from strings. [#538](https://github.com/ipython/ipykernel/pull/538) ## 5.3 ### 5.3.4 * Only run Qt eventloop in the shell stream. [#531](https://github.com/ipython/ipykernel/pull/531) ### 5.3.3 * Fix QSocketNotifier in the Qt event loop not being disabled for the control channel. [#525](https://github.com/ipython/ipykernel/pull/525) ### 5.3.2 * Restore timer based event loop as a Windows-compatible fallback. [#523](https://github.com/ipython/ipykernel/pull/523) ### 5.3.1 * Fix \#520: run post_execute and post_run_cell on async cells [#521](https://github.com/ipython/ipykernel/pull/521) * Fix exception causes in zmqshell.py [#516](https://github.com/ipython/ipykernel/pull/516) * Make pdb on Windows interruptible [#490](https://github.com/ipython/ipykernel/pull/490) ### 5.3.0 5.3.0 Adds support for Trio event loops and has some bug fixes. * Fix ipython display imports [#509](https://github.com/ipython/ipykernel/pull/509) * Skip test_unc_paths if OS is not Windows [#507](https://github.com/ipython/ipykernel/pull/507) * Allow interrupting input() on Windows, as part of effort to make pdb interruptible [#498](https://github.com/ipython/ipykernel/pull/498) * Add Trio Loop [#479](https://github.com/ipython/ipykernel/pull/479) * Flush from process even without newline [#478](https://github.com/ipython/ipykernel/pull/478) ## 5.2 ### 5.2.1 * Handle system commands that use UNC paths on Windows [#500](https://github.com/ipython/ipykernel/pull/500) * Add offset argument to seek in io test [#496](https://github.com/ipython/ipykernel/pull/496) ### 5.2.0 5.2.0 Includes several bugfixes and internal logic improvements. * Produce better traceback when kernel is interrupted [#491](https://github.com/ipython/ipykernel/pull/491) * Add `InProcessKernelClient.control_channel` for compatibility with jupyter-client v6.0.0 [#489](https://github.com/ipython/ipykernel/pull/489) * Drop support for Python 3.4 [#483](https://github.com/ipython/ipykernel/pull/483) * Work around issue related to Tornado with python3.8 on Windows ([#480](https://github.com/ipython/ipykernel/pull/480), [#481](https://github.com/ipython/ipykernel/pull/481)) * Prevent entering event loop if it is None [#464](https://github.com/ipython/ipykernel/pull/464) * Use `shell.input_transformer_manager` when available [#411](https://github.com/ipython/ipykernel/pull/411) ## 5.1 ### 5.1.4 5.1.4 Includes a few bugfixes, especially for compatibility with Python 3.8 on Windows. * Fix pickle issues when using inline matplotlib backend [#476](https://github.com/ipython/ipykernel/pull/476) * Fix an error during kernel shutdown [#463](https://github.com/ipython/ipykernel/pull/463) * Fix compatibility issues with Python 3.8 ([#456](https://github.com/ipython/ipykernel/pull/456), [#461](https://github.com/ipython/ipykernel/pull/461)) * Remove some dead code ([#474](https://github.com/ipython/ipykernel/pull/474), [#467](https://github.com/ipython/ipykernel/pull/467)) ### 5.1.3 5.1.3 Includes several bugfixes and internal logic improvements. * Fix comm shutdown behavior by adding a `deleting` option to `close` which can be set to prevent registering new comm channels during shutdown ([#433](https://github.com/ipython/ipykernel/pull/433), [#435](https://github.com/ipython/ipykernel/pull/435)) * Fix `Heartbeat._bind_socket` to return on the first bind ([#431](https://github.com/ipython/ipykernel/pull/431)) * Moved `InProcessKernelClient.flush` to `DummySocket` ([#437](https://github.com/ipython/ipykernel/pull/437)) * Don\'t redirect stdout if nose machinery is not present ([#427](https://github.com/ipython/ipykernel/pull/427)) * Rename `_asyncio.py` to `_asyncio_utils.py` to avoid name conflicts on Python 3.6+ ([#426](https://github.com/ipython/ipykernel/pull/426)) * Only generate kernelspec when installing or building wheel ([#425](https://github.com/ipython/ipykernel/pull/425)) * Fix priority ordering of control-channel messages in some cases [#443](https://github.com/ipython/ipykernel/pull/443) ### 5.1.2 5.1.2 fixes some socket-binding race conditions that caused testing failures in nbconvert. * Fix socket-binding race conditions ([#412](https://github.com/ipython/ipykernel/pull/412), [#419](https://github.com/ipython/ipykernel/pull/419)) * Add a no-op `flush` method to `DummySocket` and comply with stream API ([#405](https://github.com/ipython/ipykernel/pull/405)) * Update kernel version to indicate kernel v5.3 support ([#394](https://github.com/ipython/ipykernel/pull/394)) * Add testing for upcoming Python 3.8 and PEP 570 positional parameters ([#396](https://github.com/ipython/ipykernel/pull/396), [#408](https://github.com/ipython/ipykernel/pull/408)) ### 5.1.1 5.1.1 fixes a bug that caused cells to get stuck in a busy state. * Flush after sending replies [#390](https://github.com/ipython/ipykernel/pull/390) ### 5.1.0 5.1.0 fixes some important regressions in 5.0, especially on Windows. [5.1.0 on GitHub](https://github.com/ipython/ipykernel/milestones/5.1) * Fix message-ordering bug that could result in out-of-order executions, especially on Windows [#356](https://github.com/ipython/ipykernel/pull/356) * Fix classifiers to indicate dropped Python 2 support [#354](https://github.com/ipython/ipykernel/pull/354) * Remove some dead code [#355](https://github.com/ipython/ipykernel/pull/355) * Support rich-media responses in `inspect_requests` (tooltips) [#361](https://github.com/ipython/ipykernel/pull/361) ## 5.0 ### 5.0.0 [5.0.0 on GitHub](https://github.com/ipython/ipykernel/milestones/5.0) * Drop support for Python 2. `ipykernel` 5.0 requires Python \>= 3.4 * Add support for IPython\'s asynchronous code execution [#323](https://github.com/ipython/ipykernel/pull/323) * Update release process in `CONTRIBUTING.md` [#339](https://github.com/ipython/ipykernel/pull/339) ## 4.10 [4.10 on GitHub](https://github.com/ipython/ipykernel/milestones/4.10) * Fix compatibility with IPython 7.0 [#348](https://github.com/ipython/ipykernel/pull/348) * Fix compatibility in cases where sys.stdout can be None [#344](https://github.com/ipython/ipykernel/pull/344) ## 4.9 ### 4.9.0 [4.9.0 on GitHub](https://github.com/ipython/ipykernel/milestones/4.9) * Python 3.3 is no longer supported [#336](https://github.com/ipython/ipykernel/pull/336) * Flush stdout/stderr in KernelApp before replacing [#314](https://github.com/ipython/ipykernel/pull/314) * Allow preserving stdout and stderr in KernelApp [#315](https://github.com/ipython/ipykernel/pull/315) * Override writable method on OutStream [#316](https://github.com/ipython/ipykernel/pull/316) * Add metadata to help display matplotlib figures legibly [#336](https://github.com/ipython/ipykernel/pull/336) ## 4.8 ### 4.8.2 [4.8.2 on GitHub](https://github.com/ipython/ipykernel/milestones/4.8.2) * Fix compatibility issue with qt eventloop and pyzmq 17 [#307](https://github.com/ipython/ipykernel/pull/307). ### 4.8.1 [4.8.1 on GitHub](https://github.com/ipython/ipykernel/milestones/4.8.1) * set zmq.ROUTER_HANDOVER socket option when available to workaround libzmq reconnect bug [#300](https://github.com/ipython/ipykernel/pull/300). * Fix sdists including absolute paths for kernelspec files, which prevented installation from sdist on Windows [#306](https://github.com/ipython/ipykernel/pull/306). ### 4.8.0 [4.8.0 on GitHub](https://github.com/ipython/ipykernel/milestones/4.8) * Cleanly shutdown integrated event loops when shutting down the kernel. [#290](https://github.com/ipython/ipykernel/pull/290) * `%gui qt` now uses Qt 5 by default rather than Qt 4, following a similar change in terminal IPython. [#293](https://github.com/ipython/ipykernel/pull/293) * Fix event loop integration for `asyncio` when run with Tornado 5, which uses asyncio where available. [#296](https://github.com/ipython/ipykernel/pull/296) ## 4.7 ### 4.7.0 [4.7.0 on GitHub](https://github.com/ipython/ipykernel/milestones/4.7) * Add event loop integration for `asyncio`. * Use the new IPython completer API. * Add support for displaying GIF images (mimetype `image/gif`). * Allow the kernel to be interrupted without killing the Qt console. * Fix `is_complete` response with cell magics. * Clean up encoding of bytes objects. * Clean up help links to use `https` and improve display titles. * Clean up ioloop handling in preparation for tornado 5. ## 4.6 ### 4.6.1 [4.6.1 on GitHub](https://github.com/ipython/ipykernel/milestones/4.6.1) * Fix eventloop-integration bug preventing Qt windows/widgets from displaying with ipykernel 4.6.0 and IPython ≥ 5.2. * Avoid deprecation warnings about naive datetimes when working with jupyter_client ≥ 5.0. ### 4.6.0 [4.6.0 on GitHub](https://github.com/ipython/ipykernel/milestones/4.6) * Add to API `DisplayPublisher.publish` two new fully backward-compatible keyword-args: > * `update: bool` > * `transient: dict` * Support new `transient` key in `display_data` messages spec for `publish`. For a display data message, `transient` contains data that shouldn\'t be persisted to files or documents. Add a `display_id` to this `transient` dict by `display(obj, display_id=\...)` * Add `ipykernel_launcher` module which removes the current working directory from `sys.path` before launching the kernel. This helps to reduce the cases where the kernel won\'t start because there\'s a `random.py` (or similar) module in the current working directory. * Add busy/idle messages on IOPub during processing of aborted requests * Add active event loop setting to GUI, which enables the correct response to IPython\'s `is_event_loop_running_xxx` * Include IPython kernelspec in wheels to reduce reliance on \"native kernel spec\" in jupyter_client * Modify `OutStream` to inherit from `TextIOBase` instead of object to improve API support and error reporting * Fix IPython kernel death messages at start, such as \"Kernel Restarting\...\" and \"Kernel appears to have died\", when parent-poller handles PID 1 * Various bugfixes ## 4.5 ### 4.5.2 [4.5.2 on GitHub](https://github.com/ipython/ipykernel/milestones/4.5.2) * Fix bug when instantiating Comms outside of the IPython kernel (introduced in 4.5.1). ### 4.5.1 [4.5.1 on GitHub](https://github.com/ipython/ipykernel/milestones/4.5.1) * Add missing `stream` parameter to overridden `getpass` * Remove locks from iopub thread, which could cause deadlocks during debugging * Fix regression where KeyboardInterrupt was treated as an aborted request, rather than an error * Allow instantiating Comms outside of the IPython kernel ### 4.5.0 [4.5 on GitHub](https://github.com/ipython/ipykernel/milestones/4.5) * Use figure.dpi instead of savefig.dpi to set DPI for inline figures * Support ipympl matplotlib backend (requires IPython update as well to fully work) * Various bugfixes, including fixes for output coming from threads, and `input` when called with non-string prompts, which stdlib allows. ## 4.4 ### 4.4.1 [4.4.1 on GitHub](https://github.com/ipython/ipykernel/milestones/4.4.1) * Fix circular import of matplotlib on Python 2 caused by the inline backend changes in 4.4.0. ### 4.4.0 [4.4.0 on GitHub](https://github.com/ipython/ipykernel/milestones/4.4) * Use [MPLBACKEND](http://matplotlib.org/devel/coding_guide.html?highlight=mplbackend#developing-a-new-backend) environment variable to tell matplotlib \>= 1.5 use use the inline backend by default. This is only done if MPLBACKEND is not already set and no backend has been explicitly loaded, so setting `MPLBACKEND=Qt4Agg` or calling `%matplotlib notebook` or `matplotlib.use('Agg')` will take precedence. * Fixes for logging problems caused by 4.3, where logging could go to the terminal instead of the notebook. * Add `--sys-prefix` and `--profile` arguments to `ipython kernel install`. * Allow Comm (Widget) messages to be sent from background threads. * Select inline matplotlib backend by default if `%matplotlib` magic or `matplotlib.use()` are not called explicitly (for matplotlib \>= 1.5). * Fix some longstanding minor deviations from the message protocol (missing status: ok in a few replies, connect_reply format). * Remove calls to NoOpContext from IPython, deprecated in 5.0. ## 4.3 ### 4.3.2 * Use a nonempty dummy session key for inprocess kernels to avoid security warnings. ### 4.3.1 * Fix Windows Python 3.5 incompatibility caused by faulthandler patch in 4.3 ### 4.3.0 [4.3.0 on GitHub](https://github.com/ipython/ipykernel/milestones/4.3) * Publish all IO in a thread, via `IOPubThread`. This solves the problem of requiring `sys.stdout.flush` to be called in the notebook to produce output promptly during long-running cells. * Remove references to outdated IPython guiref in kernel banner. * Patch faulthandler to use `sys.__stderr__` instead of forwarded `sys.stderr`, which has no fileno when forwarded. * Deprecate some vestiges of the Big Split: * `ipykernel.find_connection_file` is deprecated. Use `jupyter_client.find_connection_file` instead. \- Various pieces of code specific to IPython parallel are deprecated in ipykernel and moved to ipyparallel. ## 4.2 ### 4.2.2 [4.2.2 on GitHub](https://github.com/ipython/ipykernel/milestones/4.2.2) * Don\'t show interactive debugging info when kernel crashes * Fix handling of numerical types in json_clean * Testing fixes for output capturing ### 4.2.1 [4.2.1 on GitHub](https://github.com/ipython/ipykernel/milestones/4.2.1) * Fix default display name back to \"Python X\" instead of \"pythonX\" ### 4.2.0 [4.2 on GitHub](https://github.com/ipython/ipykernel/milestones/4.2) * Support sending a full message in initial opening of comms (metadata, buffers were not previously allowed) * When using `ipython kernel install --name` to install the IPython kernelspec, default display-name to the same value as `--name`. ## 4.1 ### 4.1.1 [4.1.1 on GitHub](https://github.com/ipython/ipykernel/milestones/4.1.1) * Fix missing `ipykernel.__version__` on Python 2. * Fix missing `target_name` when opening comms from the frontend. ### 4.1.0 [4.1 on GitHub](https://github.com/ipython/ipykernel/milestones/4.1) * add `ipython kernel install` entrypoint for installing the IPython kernelspec * provisional implementation of `comm_info` request/reply for msgspec v5.1 ## 4.0 [4.0 on GitHub](https://github.com/ipython/ipykernel/milestones/4.0) 4.0 is the first release of ipykernel as a standalone package. ipykernel-6.7.0/CONTRIBUTING.md000066400000000000000000000035021417004153500157570ustar00rootroot00000000000000# Contributing Welcome! For contributing tips, follow the [Jupyter Contributing Guide](https://jupyter.readthedocs.io/en/latest/contributing/content-contributor.html). Please make sure to follow the [Jupyter Code of Conduct](https://github.com/jupyter/governance/blob/master/conduct/code_of_conduct.md). ## Installing ipykernel for development ipykernel is a pure Python package, so setting up for development is the same as most other Python projects: ```bash # clone the repo git clone https://github.com/ipython/ipykernel cd ipykernel # do a 'development' or 'editable' install with pip: pip install -e . ``` ## Releasing ipykernel Releasing ipykernel is *almost* standard for a Python package: - set version for release - make and publish tag - publish release to PyPI - set version back to development The one extra step for ipykernel is that we need to make separate wheels for Python 2 and 3 because the bundled kernelspec has different contents for Python 2 and 3. This affects only the 4.x branch of ipykernel as the 5+ version is only compatible Python 3. The full release process is available below: ```bash # make sure version is set in ipykernel/_version.py VERSION="4.9.0" # commit the version and make a release tag git add ipykernel/_version.py git commit -m "release $VERSION" git tag -am "release $VERSION" $VERSION # push the changes to the repo git push git push --tags # publish the release to PyPI # note the extra `python2 setup.py bdist_wheel` for creating # the wheel for Python 2 pip install --upgrade twine git clean -xfd python3 setup.py sdist bdist_wheel python2 setup.py bdist_wheel # the extra step for the 4.x branch. twine upload dist/* # set the version back to '.dev' in ipykernel/_version.py # e.g. 4.10.0.dev if we just released 4.9.0 git add ipykernel/_version.py git commit -m "back to dev" git push ``` ipykernel-6.7.0/COPYING.md000066400000000000000000000054231417004153500151640ustar00rootroot00000000000000# Licensing terms This project is licensed under the terms of the Modified BSD License (also known as New or Revised or 3-Clause BSD), as follows: - Copyright (c) 2015, IPython Development Team All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the IPython Development Team nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ## About the IPython Development Team The IPython Development Team is the set of all contributors to the IPython project. This includes all of the IPython subprojects. The core team that coordinates development on GitHub can be found here: https://github.com/ipython/. ## Our Copyright Policy IPython uses a shared copyright model. Each contributor maintains copyright over their contributions to IPython. But, it is important to note that these contributions are typically only changes to the repositories. Thus, the IPython source code, in its entirety is not the copyright of any single person or institution. Instead, it is the collective copyright of the entire IPython Development Team. If individual contributors want to maintain a record of what changes/contributions they have specific copyright on, they should indicate their copyright in the commit message of the change, when they commit the change to one of the IPython repositories. With this in mind, the following banner should be used in any source code file to indicate the copyright and license terms: # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. ipykernel-6.7.0/MANIFEST.in000066400000000000000000000006631417004153500152710ustar00rootroot00000000000000include *.md include pyproject.toml # Documentation graft docs exclude docs/\#* # Examples graft examples # docs subdirs we want to skip prune docs/_build prune docs/gh-pages prune docs/dist # Patterns to exclude from any directory global-exclude *~ global-exclude *.pyc global-exclude *.pyo global-exclude .git global-exclude .ipynb_checkpoints prune data_kernelspec exclude .mailmap exclude readthedocs.yml exclude .coveragerc ipykernel-6.7.0/README.md000066400000000000000000000012211417004153500150010ustar00rootroot00000000000000# IPython Kernel for Jupyter This package provides the IPython kernel for Jupyter. ## Installation from source 1. `git clone` 2. `cd ipykernel` 3. `pip install -e ".[test]"` After that, all normal `ipython` commands will use this newly-installed version of the kernel. ## Running tests Follow the instructions from `Installation from source`. and then from the root directory ```bash pytest ipykernel ``` ## Running tests with coverage Follow the instructions from `Installation from source`. and then from the root directory ```bash pytest ipykernel -vv -s --cov ipykernel --cov-branch --cov-report term-missing:skip-covered --durations 10 ``` ipykernel-6.7.0/RELEASE.md000066400000000000000000000010041417004153500151230ustar00rootroot00000000000000# Release Guide ## Using `jupyter_releaser` The recommended way to make a release is to use [`jupyter_releaser`](https://github.com/jupyter-server/jupyter_releaser#checklist-for-adoption). ## Manual Release - Update `CHANGELOG` - Run the following: ```bash export VERSION= pip install jupyter_releaser tbump --only-patch $VERSION git commit -a -m "Release $VERSION" git tag $VERSION; true; git push --all git push --tags rm -rf dist build python -m build . twine check dist/* twine upload dist/* ``` ipykernel-6.7.0/docs/000077500000000000000000000000001417004153500144565ustar00rootroot00000000000000ipykernel-6.7.0/docs/Makefile000066400000000000000000000164151417004153500161250ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/IPythonKernel.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/IPythonKernel.qhc" applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/IPythonKernel" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/IPythonKernel" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." ipykernel-6.7.0/docs/conf.py000066400000000000000000000235541417004153500157660ustar00rootroot00000000000000#!/usr/bin/env python3 # # IPython Kernel documentation build configuration file, created by # sphinx-quickstart on Mon Oct 5 11:32:44 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import shutil # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'myst_parser', 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinxcontrib_github_alt', ] github_project_url = "https://github.com/ipython/ipykernel" # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'IPython Kernel' copyright = '2015, IPython Development Team' author = 'IPython Development Team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # version_ns = {} here = os.path.dirname(__file__) version_py = os.path.join(here, os.pardir, 'ipykernel', '_version.py') with open(version_py) as f: exec(compile(f.read(), version_py, 'exec'), version_ns) # The short X.Y version. version = '%i.%i' % version_ns['version_info'][:2] # The full version, including alpha/beta/rc tags. release = version_ns['__version__'] # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. default_role = 'literal' # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'ipykerneldoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'ipykernel.tex', 'IPython Kernel Documentation', 'IPython Development Team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'ipykernel', 'IPython Kernel Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'ipykernel', 'IPython Kernel Documentation', author, 'ipykernel', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { 'python': ('https://docs.python.org/3/', None), 'ipython': ('https://ipython.readthedocs.io/en/latest', None), 'jupyter': ('https://jupyter.readthedocs.io/en/latest', None), } def setup(app): here = os.path.dirname(os.path.abspath(__file__)) shutil.copy(os.path.join(here, '..', 'CHANGELOG.md'), 'changelog.md') ipykernel-6.7.0/docs/index.rst000066400000000000000000000006241417004153500163210ustar00rootroot00000000000000.. _index: IPython Kernel Docs =================== This contains minimal version-sensitive documentation for the IPython kernel package. Most IPython kernel documentation is in the `IPython documentation `_. Contents: .. toctree:: :maxdepth: 2 changelog Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ipykernel-6.7.0/docs/make.bat000066400000000000000000000161321417004153500160660ustar00rootroot00000000000000@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. xml to make Docutils-native XML files echo. pseudoxml to make pseudoxml-XML files for display purposes echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled echo. coverage to run coverage check of the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) REM Check if sphinx-build is available and fallback to Python version if any %SPHINXBUILD% 2> nul if errorlevel 9009 goto sphinx_python goto sphinx_ok :sphinx_python set SPHINXBUILD=python -m sphinx.__init__ %SPHINXBUILD% 2> nul if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) :sphinx_ok if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\IPythonKernel.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\IPythonKernel.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdf" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdfja" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf-ja cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) if "%1" == "coverage" ( %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage if errorlevel 1 exit /b 1 echo. echo.Testing of coverage in the sources finished, look at the ^ results in %BUILDDIR%/coverage/python.txt. goto end ) if "%1" == "xml" ( %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml if errorlevel 1 exit /b 1 echo. echo.Build finished. The XML files are in %BUILDDIR%/xml. goto end ) if "%1" == "pseudoxml" ( %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml if errorlevel 1 exit /b 1 echo. echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. goto end ) :end ipykernel-6.7.0/docs/requirements.txt000066400000000000000000000000451417004153500177410ustar00rootroot00000000000000sphinxcontrib_github_alt myst_parser ipykernel-6.7.0/examples/000077500000000000000000000000001417004153500153445ustar00rootroot00000000000000ipykernel-6.7.0/examples/embedding/000077500000000000000000000000001417004153500172625ustar00rootroot00000000000000ipykernel-6.7.0/examples/embedding/inprocess_qtconsole.py000066400000000000000000000045731417004153500237410ustar00rootroot00000000000000import os import sys import tornado from qtconsole.rich_ipython_widget import RichIPythonWidget from qtconsole.inprocess import QtInProcessKernelManager from IPython.lib import guisupport def print_process_id(): print('Process ID is:', os.getpid()) def init_asyncio_patch(): """set default asyncio policy to be compatible with tornado Tornado 6 (at least) is not compatible with the default asyncio implementation on Windows Pick the older SelectorEventLoopPolicy on Windows if the known-incompatible default policy is in use. do this as early as possible to make it a low priority and overrideable ref: https://github.com/tornadoweb/tornado/issues/2608 FIXME: if/when tornado supports the defaults in asyncio, remove and bump tornado requirement for py38 """ if sys.platform.startswith("win") and sys.version_info >= (3, 8) and tornado.version_info < (6, 1): import asyncio try: from asyncio import ( WindowsProactorEventLoopPolicy, WindowsSelectorEventLoopPolicy, ) except ImportError: pass # not affected else: if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy: # WindowsProactorEventLoopPolicy is not compatible with tornado 6 # fallback to the pre-3.8 default of Selector asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy()) def main(): # Print the ID of the main process print_process_id() init_asyncio_patch() app = guisupport.get_app_qt4() # Create an in-process kernel # >>> print_process_id() # will print the same process ID as the main process kernel_manager = QtInProcessKernelManager() kernel_manager.start_kernel() kernel = kernel_manager.kernel kernel.gui = 'qt4' kernel.shell.push({'foo': 43, 'print_process_id': print_process_id}) kernel_client = kernel_manager.client() kernel_client.start_channels() def stop(): kernel_client.stop_channels() kernel_manager.shutdown_kernel() app.exit() control = RichIPythonWidget() control.kernel_manager = kernel_manager control.kernel_client = kernel_client control.exit_requested.connect(stop) control.show() guisupport.start_event_loop_qt4(app) if __name__ == '__main__': main() ipykernel-6.7.0/examples/embedding/inprocess_terminal.py000066400000000000000000000040221417004153500235320ustar00rootroot00000000000000import os import sys import tornado from ipykernel.inprocess import InProcessKernelManager from jupyter_console.ptshell import ZMQTerminalInteractiveShell def print_process_id(): print('Process ID is:', os.getpid()) def init_asyncio_patch(): """set default asyncio policy to be compatible with tornado Tornado 6 (at least) is not compatible with the default asyncio implementation on Windows Pick the older SelectorEventLoopPolicy on Windows if the known-incompatible default policy is in use. do this as early as possible to make it a low priority and overrideable ref: https://github.com/tornadoweb/tornado/issues/2608 FIXME: if/when tornado supports the defaults in asyncio, remove and bump tornado requirement for py38 """ if sys.platform.startswith("win") and sys.version_info >= (3, 8) and tornado.version_info < (6, 1): import asyncio try: from asyncio import ( WindowsProactorEventLoopPolicy, WindowsSelectorEventLoopPolicy, ) except ImportError: pass # not affected else: if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy: # WindowsProactorEventLoopPolicy is not compatible with tornado 6 # fallback to the pre-3.8 default of Selector asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy()) def main(): print_process_id() # Create an in-process kernel # >>> print_process_id() # will print the same process ID as the main process init_asyncio_patch() kernel_manager = InProcessKernelManager() kernel_manager.start_kernel() kernel = kernel_manager.kernel kernel.gui = 'qt4' kernel.shell.push({'foo': 43, 'print_process_id': print_process_id}) client = kernel_manager.client() client.start_channels() shell = ZMQTerminalInteractiveShell(manager=kernel_manager, client=client) shell.mainloop() if __name__ == '__main__': main() ipykernel-6.7.0/examples/embedding/internal_ipkernel.py000066400000000000000000000037251417004153500233500ustar00rootroot00000000000000#----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import sys from IPython.lib.kernel import connect_qtconsole from ipykernel.kernelapp import IPKernelApp #----------------------------------------------------------------------------- # Functions and classes #----------------------------------------------------------------------------- def mpl_kernel(gui): """Launch and return an IPython kernel with matplotlib support for the desired gui """ kernel = IPKernelApp.instance() kernel.initialize(['python', '--matplotlib=%s' % gui, #'--log-level=10' ]) return kernel class InternalIPKernel: def init_ipkernel(self, backend): # Start IPython kernel with GUI event loop and mpl support self.ipkernel = mpl_kernel(backend) # To create and track active qt consoles self.consoles = [] # This application will also act on the shell user namespace self.namespace = self.ipkernel.shell.user_ns # Example: a variable that will be seen by the user in the shell, and # that the GUI modifies (the 'Counter++' button increments it): self.namespace['app_counter'] = 0 #self.namespace['ipkernel'] = self.ipkernel # dbg def print_namespace(self, evt=None): print("\n***Variables in User namespace***") for k, v in self.namespace.items(): if not k.startswith('_'): print('%s -> %r' % (k, v)) sys.stdout.flush() def new_qt_console(self, evt=None): """start a new qtconsole connected to our kernel""" return connect_qtconsole(self.ipkernel.abs_connection_file, profile=self.ipkernel.profile) def count(self, evt=None): self.namespace['app_counter'] += 1 def cleanup_consoles(self, evt=None): for c in self.consoles: c.kill() ipykernel-6.7.0/examples/embedding/ipkernel_qtapp.py000077500000000000000000000054061417004153500226620ustar00rootroot00000000000000#!/usr/bin/env python """Example integrating an IPython kernel into a GUI App. This trivial GUI application internally starts an IPython kernel, to which Qt consoles can be connected either by the user at the command line or started from the GUI itself, via a button. The GUI can also manipulate one variable in the kernel's namespace, and print the namespace to the console. Play with it by running the script and then opening one or more consoles, and pushing the 'Counter++' and 'Namespace' buttons. Upon exit, it should automatically close all consoles opened from the GUI. Consoles attached separately from a terminal will not be terminated, though they will notice that their kernel died. """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from PyQt4 import Qt from internal_ipkernel import InternalIPKernel #----------------------------------------------------------------------------- # Functions and classes #----------------------------------------------------------------------------- class SimpleWindow(Qt.QWidget, InternalIPKernel): def __init__(self, app): Qt.QWidget.__init__(self) self.app = app self.add_widgets() self.init_ipkernel('qt') def add_widgets(self): self.setGeometry(300, 300, 400, 70) self.setWindowTitle('IPython in your app') # Add simple buttons: console = Qt.QPushButton('Qt Console', self) console.setGeometry(10, 10, 100, 35) self.connect(console, Qt.SIGNAL('clicked()'), self.new_qt_console) namespace = Qt.QPushButton('Namespace', self) namespace.setGeometry(120, 10, 100, 35) self.connect(namespace, Qt.SIGNAL('clicked()'), self.print_namespace) count = Qt.QPushButton('Count++', self) count.setGeometry(230, 10, 80, 35) self.connect(count, Qt.SIGNAL('clicked()'), self.count) # Quit and cleanup quit = Qt.QPushButton('Quit', self) quit.setGeometry(320, 10, 60, 35) self.connect(quit, Qt.SIGNAL('clicked()'), Qt.qApp, Qt.SLOT('quit()')) self.app.connect(self.app, Qt.SIGNAL("lastWindowClosed()"), self.app, Qt.SLOT("quit()")) self.app.aboutToQuit.connect(self.cleanup_consoles) #----------------------------------------------------------------------------- # Main script #----------------------------------------------------------------------------- if __name__ == "__main__": app = Qt.QApplication([]) # Create our window win = SimpleWindow(app) win.show() # Very important, IPython-specific step: this gets GUI event loop # integration going, and it replaces calling app.exec_() win.ipkernel.start() ipykernel-6.7.0/examples/embedding/ipkernel_wxapp.py000077500000000000000000000103241417004153500226670ustar00rootroot00000000000000#!/usr/bin/env python """Example integrating an IPython kernel into a GUI App. This trivial GUI application internally starts an IPython kernel, to which Qt consoles can be connected either by the user at the command line or started from the GUI itself, via a button. The GUI can also manipulate one variable in the kernel's namespace, and print the namespace to the console. Play with it by running the script and then opening one or more consoles, and pushing the 'Counter++' and 'Namespace' buttons. Upon exit, it should automatically close all consoles opened from the GUI. Consoles attached separately from a terminal will not be terminated, though they will notice that their kernel died. Ref: Modified from wxPython source code wxPython/samples/simple/simple.py """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import sys import wx from internal_ipkernel import InternalIPKernel #----------------------------------------------------------------------------- # Functions and classes #----------------------------------------------------------------------------- class MyFrame(wx.Frame, InternalIPKernel): """ This is MyFrame. It just shows a few controls on a wxPanel, and has a simple menu. """ def __init__(self, parent, title): wx.Frame.__init__(self, parent, -1, title, pos=(150, 150), size=(350, 285)) # Create the menubar menuBar = wx.MenuBar() # and a menu menu = wx.Menu() # add an item to the menu, using \tKeyName automatically # creates an accelerator, the third param is some help text # that will show up in the statusbar menu.Append(wx.ID_EXIT, "E&xit\tAlt-X", "Exit this simple sample") # bind the menu event to an event handler self.Bind(wx.EVT_MENU, self.OnTimeToClose, id=wx.ID_EXIT) # and put the menu on the menubar menuBar.Append(menu, "&File") self.SetMenuBar(menuBar) self.CreateStatusBar() # Now create the Panel to put the other controls on. panel = wx.Panel(self) # and a few controls text = wx.StaticText(panel, -1, "Hello World!") text.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.BOLD)) text.SetSize(text.GetBestSize()) qtconsole_btn = wx.Button(panel, -1, "Qt Console") ns_btn = wx.Button(panel, -1, "Namespace") count_btn = wx.Button(panel, -1, "Count++") close_btn = wx.Button(panel, -1, "Quit") # bind the button events to handlers self.Bind(wx.EVT_BUTTON, self.new_qt_console, qtconsole_btn) self.Bind(wx.EVT_BUTTON, self.print_namespace, ns_btn) self.Bind(wx.EVT_BUTTON, self.count, count_btn) self.Bind(wx.EVT_BUTTON, self.OnTimeToClose, close_btn) # Use a sizer to layout the controls, stacked vertically and with # a 10 pixel border around each sizer = wx.BoxSizer(wx.VERTICAL) for ctrl in [text, qtconsole_btn, ns_btn, count_btn, close_btn]: sizer.Add(ctrl, 0, wx.ALL, 10) panel.SetSizer(sizer) panel.Layout() # Start the IPython kernel with gui support self.init_ipkernel('wx') def OnTimeToClose(self, evt): """Event handler for the button click.""" print("See ya later!") sys.stdout.flush() self.cleanup_consoles(evt) self.Close() # Not sure why, but our IPython kernel seems to prevent normal WX # shutdown, so an explicit exit() call is needed. sys.exit() class MyApp(wx.App): def OnInit(self): frame = MyFrame(None, "Simple wxPython App") self.SetTopWindow(frame) frame.Show(True) self.ipkernel = frame.ipkernel return True #----------------------------------------------------------------------------- # Main script #----------------------------------------------------------------------------- if __name__ == '__main__': app = MyApp(redirect=False, clearSigInt=False) # Very important, IPython-specific step: this gets GUI event loop # integration going, and it replaces calling app.MainLoop() app.ipkernel.start() ipykernel-6.7.0/ipykernel/000077500000000000000000000000001417004153500155305ustar00rootroot00000000000000ipykernel-6.7.0/ipykernel/__init__.py000066400000000000000000000001751417004153500176440ustar00rootroot00000000000000from ._version import version_info, __version__, kernel_protocol_version_info, kernel_protocol_version from .connect import *ipykernel-6.7.0/ipykernel/__main__.py000066400000000000000000000001441417004153500176210ustar00rootroot00000000000000if __name__ == '__main__': from ipykernel import kernelapp as app app.launch_new_instance() ipykernel-6.7.0/ipykernel/_eventloop_macos.py000066400000000000000000000077171417004153500214520ustar00rootroot00000000000000"""Eventloop hook for OS X Calls NSApp / CoreFoundation APIs via ctypes. """ # cribbed heavily from IPython.terminal.pt_inputhooks.osx # obj-c boilerplate from appnope, used under BSD 2-clause import ctypes import ctypes.util from threading import Event objc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('objc')) void_p = ctypes.c_void_p objc.objc_getClass.restype = void_p objc.sel_registerName.restype = void_p objc.objc_msgSend.restype = void_p objc.objc_msgSend.argtypes = [void_p, void_p] msg = objc.objc_msgSend def _utf8(s): """ensure utf8 bytes""" if not isinstance(s, bytes): s = s.encode('utf8') return s def n(name): """create a selector name (for ObjC methods)""" return objc.sel_registerName(_utf8(name)) def C(classname): """get an ObjC Class by name""" return objc.objc_getClass(_utf8(classname)) # end obj-c boilerplate from appnope # CoreFoundation C-API calls we will use: CoreFoundation = ctypes.cdll.LoadLibrary(ctypes.util.find_library('CoreFoundation')) CFAbsoluteTimeGetCurrent = CoreFoundation.CFAbsoluteTimeGetCurrent CFAbsoluteTimeGetCurrent.restype = ctypes.c_double CFRunLoopGetCurrent = CoreFoundation.CFRunLoopGetCurrent CFRunLoopGetCurrent.restype = void_p CFRunLoopGetMain = CoreFoundation.CFRunLoopGetMain CFRunLoopGetMain.restype = void_p CFRunLoopStop = CoreFoundation.CFRunLoopStop CFRunLoopStop.restype = None CFRunLoopStop.argtypes = [void_p] CFRunLoopTimerCreate = CoreFoundation.CFRunLoopTimerCreate CFRunLoopTimerCreate.restype = void_p CFRunLoopTimerCreate.argtypes = [ void_p, # allocator (NULL) ctypes.c_double, # fireDate ctypes.c_double, # interval ctypes.c_int, # flags (0) ctypes.c_int, # order (0) void_p, # callout void_p, # context ] CFRunLoopAddTimer = CoreFoundation.CFRunLoopAddTimer CFRunLoopAddTimer.restype = None CFRunLoopAddTimer.argtypes = [ void_p, void_p, void_p ] kCFRunLoopCommonModes = void_p.in_dll(CoreFoundation, 'kCFRunLoopCommonModes') def _NSApp(): """Return the global NSApplication instance (NSApp)""" return msg(C('NSApplication'), n('sharedApplication')) def _wake(NSApp): """Wake the Application""" event = msg(C('NSEvent'), n('otherEventWithType:location:modifierFlags:' 'timestamp:windowNumber:context:subtype:data1:data2:'), 15, # Type 0, # location 0, # flags 0, # timestamp 0, # window None, # context 0, # subtype 0, # data1 0, # data2 ) msg(NSApp, n('postEvent:atStart:'), void_p(event), True) _triggered = Event() def stop(timer=None, loop=None): """Callback to fire when there's input to be read""" _triggered.set() NSApp = _NSApp() # if NSApp is not running, stop CFRunLoop directly, # otherwise stop and wake NSApp if msg(NSApp, n('isRunning')): msg(NSApp, n('stop:'), NSApp) _wake(NSApp) else: CFRunLoopStop(CFRunLoopGetCurrent()) _c_callback_func_type = ctypes.CFUNCTYPE(None, void_p, void_p) _c_stop_callback = _c_callback_func_type(stop) def _stop_after(delay): """Register callback to stop eventloop after a delay""" timer = CFRunLoopTimerCreate( None, # allocator CFAbsoluteTimeGetCurrent() + delay, # fireDate 0, # interval 0, # flags 0, # order _c_stop_callback, None, ) CFRunLoopAddTimer( CFRunLoopGetMain(), timer, kCFRunLoopCommonModes, ) def mainloop(duration=1): """run the Cocoa eventloop for the specified duration (seconds)""" _triggered.clear() NSApp = _NSApp() _stop_after(duration) msg(NSApp, n('run')) if not _triggered.is_set(): # app closed without firing callback, # probably due to last window being closed. # Run the loop manually in this case, # since there may be events still to process (ipython/ipython#9734) CoreFoundation.CFRunLoopRun() ipykernel-6.7.0/ipykernel/_version.py000066400000000000000000000010611417004153500177240ustar00rootroot00000000000000""" store the current version info of the server. """ import re # Version string must appear intact for tbump versioning __version__ = '6.7.0' # Build up version_info tuple for backwards compatibility pattern = r'(?P\d+).(?P\d+).(?P\d+)(?P.*)' match = re.match(pattern, __version__) parts = [int(match[part]) for part in ['major', 'minor', 'patch']] if match['rest']: parts.append(match['rest']) version_info = tuple(parts) kernel_protocol_version_info = (5, 3) kernel_protocol_version = '%s.%s' % kernel_protocol_version_info ipykernel-6.7.0/ipykernel/comm/000077500000000000000000000000001417004153500164635ustar00rootroot00000000000000ipykernel-6.7.0/ipykernel/comm/__init__.py000066400000000000000000000000531417004153500205720ustar00rootroot00000000000000from .manager import * from .comm import * ipykernel-6.7.0/ipykernel/comm/comm.py000066400000000000000000000125551417004153500200000ustar00rootroot00000000000000"""Base class for a Comm""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import uuid from traitlets.config import LoggingConfigurable from ipykernel.kernelbase import Kernel from ipykernel.jsonutil import json_clean from traitlets import Instance, Unicode, Bytes, Bool, Dict, Any, default class Comm(LoggingConfigurable): """Class for communicating between a Frontend and a Kernel""" kernel = Instance('ipykernel.kernelbase.Kernel', allow_none=True) @default('kernel') def _default_kernel(self): if Kernel.initialized(): return Kernel.instance() comm_id = Unicode() @default('comm_id') def _default_comm_id(self): return uuid.uuid4().hex primary = Bool(True, help="Am I the primary or secondary Comm?") target_name = Unicode('comm') target_module = Unicode(None, allow_none=True, help="""requirejs module from which to load comm target.""") topic = Bytes() @default('topic') def _default_topic(self): return ('comm-%s' % self.comm_id).encode('ascii') _open_data = Dict(help="data dict, if any, to be included in comm_open") _close_data = Dict(help="data dict, if any, to be included in comm_close") _msg_callback = Any() _close_callback = Any() _closed = Bool(True) def __init__(self, target_name='', data=None, metadata=None, buffers=None, **kwargs): if target_name: kwargs['target_name'] = target_name super().__init__(**kwargs) if self.kernel: if self.primary: # I am primary, open my peer. self.open(data=data, metadata=metadata, buffers=buffers) else: self._closed = False def _publish_msg(self, msg_type, data=None, metadata=None, buffers=None, **keys): """Helper for sending a comm message on IOPub""" data = {} if data is None else data metadata = {} if metadata is None else metadata content = json_clean(dict(data=data, comm_id=self.comm_id, **keys)) self.kernel.session.send(self.kernel.iopub_socket, msg_type, content, metadata=json_clean(metadata), parent=self.kernel.get_parent("shell"), ident=self.topic, buffers=buffers, ) def __del__(self): """trigger close on gc""" self.close(deleting=True) # publishing messages def open(self, data=None, metadata=None, buffers=None): """Open the frontend-side version of this comm""" if data is None: data = self._open_data comm_manager = getattr(self.kernel, 'comm_manager', None) if comm_manager is None: raise RuntimeError("Comms cannot be opened without a kernel " "and a comm_manager attached to that kernel.") comm_manager.register_comm(self) try: self._publish_msg('comm_open', data=data, metadata=metadata, buffers=buffers, target_name=self.target_name, target_module=self.target_module, ) self._closed = False except Exception: comm_manager.unregister_comm(self) raise def close(self, data=None, metadata=None, buffers=None, deleting=False): """Close the frontend-side version of this comm""" if self._closed: # only close once return self._closed = True # nothing to send if we have no kernel # can be None during interpreter cleanup if not self.kernel: return if data is None: data = self._close_data self._publish_msg('comm_close', data=data, metadata=metadata, buffers=buffers, ) if not deleting: # If deleting, the comm can't be registered self.kernel.comm_manager.unregister_comm(self) def send(self, data=None, metadata=None, buffers=None): """Send a message to the frontend-side version of this comm""" self._publish_msg('comm_msg', data=data, metadata=metadata, buffers=buffers, ) # registering callbacks def on_close(self, callback): """Register a callback for comm_close Will be called with the `data` of the close message. Call `on_close(None)` to disable an existing callback. """ self._close_callback = callback def on_msg(self, callback): """Register a callback for comm_msg Will be called with the `data` of any comm_msg messages. Call `on_msg(None)` to disable an existing callback. """ self._msg_callback = callback # handling of incoming messages def handle_close(self, msg): """Handle a comm_close message""" self.log.debug("handle_close[%s](%s)", self.comm_id, msg) if self._close_callback: self._close_callback(msg) def handle_msg(self, msg): """Handle a comm_msg message""" self.log.debug("handle_msg[%s](%s)", self.comm_id, msg) if self._msg_callback: shell = self.kernel.shell if shell: shell.events.trigger('pre_execute') self._msg_callback(msg) if shell: shell.events.trigger('post_execute') __all__ = ['Comm'] ipykernel-6.7.0/ipykernel/comm/manager.py000066400000000000000000000076541417004153500204630ustar00rootroot00000000000000"""Base class to manage comms""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import logging from traitlets.config import LoggingConfigurable from traitlets.utils.importstring import import_item from traitlets import Instance, Dict from .comm import Comm class CommManager(LoggingConfigurable): """Manager for Comms in the Kernel""" kernel = Instance('ipykernel.kernelbase.Kernel') comms = Dict() targets = Dict() # Public APIs def register_target(self, target_name, f): """Register a callable f for a given target name f will be called with two arguments when a comm_open message is received with `target`: - the Comm instance - the `comm_open` message itself. f can be a Python callable or an import string for one. """ if isinstance(f, str): f = import_item(f) self.targets[target_name] = f def unregister_target(self, target_name, f): """Unregister a callable registered with register_target""" return self.targets.pop(target_name) def register_comm(self, comm): """Register a new comm""" comm_id = comm.comm_id comm.kernel = self.kernel self.comms[comm_id] = comm return comm_id def unregister_comm(self, comm): """Unregister a comm, and close its counterpart""" # unlike get_comm, this should raise a KeyError comm = self.comms.pop(comm.comm_id) def get_comm(self, comm_id): """Get a comm with a particular id Returns the comm if found, otherwise None. This will not raise an error, it will log messages if the comm cannot be found. """ try: return self.comms[comm_id] except KeyError: self.log.warning("No such comm: %s", comm_id) if self.log.isEnabledFor(logging.DEBUG): # don't create the list of keys if debug messages aren't enabled self.log.debug("Current comms: %s", list(self.comms.keys())) # Message handlers def comm_open(self, stream, ident, msg): """Handler for comm_open messages""" content = msg['content'] comm_id = content['comm_id'] target_name = content['target_name'] f = self.targets.get(target_name, None) comm = Comm(comm_id=comm_id, primary=False, target_name=target_name, ) self.register_comm(comm) if f is None: self.log.error("No such comm target registered: %s", target_name) else: try: f(comm, msg) return except Exception: self.log.error("Exception opening comm with target: %s", target_name, exc_info=True) # Failure. try: comm.close() except Exception: self.log.error("""Could not close comm during `comm_open` failure clean-up. The comm may not have been opened yet.""", exc_info=True) def comm_msg(self, stream, ident, msg): """Handler for comm_msg messages""" content = msg['content'] comm_id = content['comm_id'] comm = self.get_comm(comm_id) if comm is None: return try: comm.handle_msg(msg) except Exception: self.log.error('Exception in comm_msg for %s', comm_id, exc_info=True) def comm_close(self, stream, ident, msg): """Handler for comm_close messages""" content = msg['content'] comm_id = content['comm_id'] comm = self.get_comm(comm_id) if comm is None: return self.comms[comm_id]._closed = True del self.comms[comm_id] try: comm.handle_close(msg) except Exception: self.log.error('Exception in comm_close for %s', comm_id, exc_info=True) __all__ = ['CommManager'] ipykernel-6.7.0/ipykernel/compiler.py000066400000000000000000000047721417004153500177260ustar00rootroot00000000000000from IPython.core.compilerop import CachingCompiler import tempfile import os import sys def murmur2_x86(data, seed): m = 0x5bd1e995 data = [chr(d) for d in str.encode(data, "utf8")] length = len(data) h = seed ^ length rounded_end = (length & 0xfffffffc) for i in range(0, rounded_end, 4): k = (ord(data[i]) & 0xff) | ((ord(data[i + 1]) & 0xff) << 8) | \ ((ord(data[i + 2]) & 0xff) << 16) | (ord(data[i + 3]) << 24) k = (k * m) & 0xffffffff k ^= k >> 24 k = (k * m) & 0xffffffff h = (h * m) & 0xffffffff h ^= k val = length & 0x03 k = 0 if val == 3: k = (ord(data[rounded_end + 2]) & 0xff) << 16 if val in [2, 3]: k |= (ord(data[rounded_end + 1]) & 0xff) << 8 if val in [1, 2, 3]: k |= ord(data[rounded_end]) & 0xff h ^= k h = (h * m) & 0xffffffff h ^= h >> 13 h = (h * m) & 0xffffffff h ^= h >> 15 return h convert_to_long_pathname = lambda filename:filename if sys.platform == 'win32': try: import ctypes from ctypes.wintypes import MAX_PATH, LPCWSTR, LPWSTR, DWORD _GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW _GetLongPathName.argtypes = [LPCWSTR, LPWSTR, DWORD] _GetLongPathName.restype = DWORD def _convert_to_long_pathname(filename): buf = ctypes.create_unicode_buffer(MAX_PATH) rv = _GetLongPathName(filename, buf, MAX_PATH) if rv != 0 and rv <= MAX_PATH: filename = buf.value return filename # test that it works so if there are any issues we fail just once here _convert_to_long_pathname(__file__) except: pass else: convert_to_long_pathname = _convert_to_long_pathname def get_tmp_directory(): tmp_dir = convert_to_long_pathname(tempfile.gettempdir()) pid = os.getpid() return tmp_dir + os.sep + 'ipykernel_' + str(pid) def get_tmp_hash_seed(): hash_seed = 0xc70f6907 return hash_seed def get_file_name(code): cell_name = os.environ.get("IPYKERNEL_CELL_NAME") if cell_name is None: name = murmur2_x86(code, get_tmp_hash_seed()) cell_name = get_tmp_directory() + os.sep + str(name) + '.py' return cell_name class XCachingCompiler(CachingCompiler): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.log = None def get_code_name(self, raw_code, code, number): return get_file_name(raw_code) ipykernel-6.7.0/ipykernel/connect.py000066400000000000000000000072401417004153500175360ustar00rootroot00000000000000"""Connection file-related utilities for the kernel """ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import json import sys from subprocess import Popen, PIPE import jupyter_client from jupyter_client import write_connection_file def get_connection_file(app=None): """Return the path to the connection file of an app Parameters ---------- app : IPKernelApp instance [optional] If unspecified, the currently running app will be used """ from traitlets.utils import filefind if app is None: from ipykernel.kernelapp import IPKernelApp if not IPKernelApp.initialized(): raise RuntimeError("app not specified, and not in a running Kernel") app = IPKernelApp.instance() return filefind(app.connection_file, ['.', app.connection_dir]) def _find_connection_file(connection_file): """Return the absolute path for a connection file - If nothing specified, return current Kernel's connection file - Otherwise, call jupyter_client.find_connection_file """ if connection_file is None: # get connection file from current kernel return get_connection_file() else: return jupyter_client.find_connection_file(connection_file) def get_connection_info(connection_file=None, unpack=False): """Return the connection information for the current Kernel. Parameters ---------- connection_file : str [optional] The connection file to be used. Can be given by absolute path, or IPython will search in the security directory. If run from IPython, If unspecified, the connection file for the currently running IPython Kernel will be used, which is only allowed from inside a kernel. unpack : bool [default: False] if True, return the unpacked dict, otherwise just the string contents of the file. Returns ------- The connection dictionary of the current kernel, as string or dict, depending on `unpack`. """ cf = _find_connection_file(connection_file) with open(cf) as f: info = f.read() if unpack: info = json.loads(info) # ensure key is bytes: info["key"] = info.get("key", "").encode() return info def connect_qtconsole(connection_file=None, argv=None): """Connect a qtconsole to the current kernel. This is useful for connecting a second qtconsole to a kernel, or to a local notebook. Parameters ---------- connection_file : str [optional] The connection file to be used. Can be given by absolute path, or IPython will search in the security directory. If run from IPython, If unspecified, the connection file for the currently running IPython Kernel will be used, which is only allowed from inside a kernel. argv : list [optional] Any extra args to be passed to the console. Returns ------- :class:`subprocess.Popen` instance running the qtconsole frontend """ argv = [] if argv is None else argv cf = _find_connection_file(connection_file) cmd = ';'.join([ "from IPython.qt.console import qtconsoleapp", "qtconsoleapp.main()" ]) kwargs = {} # Launch the Qt console in a separate session & process group, so # interrupting the kernel doesn't kill it. kwargs['start_new_session'] = True return Popen([sys.executable, '-c', cmd, '--existing', cf] + argv, stdout=PIPE, stderr=PIPE, close_fds=(sys.platform != 'win32'), **kwargs ) __all__ = [ 'write_connection_file', 'get_connection_file', 'get_connection_info', 'connect_qtconsole', ] ipykernel-6.7.0/ipykernel/control.py000066400000000000000000000013441417004153500175640ustar00rootroot00000000000000from threading import Thread import zmq if zmq.pyzmq_version_info() >= (17, 0): from tornado.ioloop import IOLoop else: # deprecated since pyzmq 17 from zmq.eventloop.ioloop import IOLoop class ControlThread(Thread): def __init__(self, **kwargs): Thread.__init__(self, **kwargs) self.io_loop = IOLoop(make_current=False) self.pydev_do_not_trace = True self.is_pydev_daemon_thread = True def run(self): self.io_loop.make_current() try: self.io_loop.start() finally: self.io_loop.close() def stop(self): """Stop the thread. This method is threadsafe. """ self.io_loop.add_callback(self.io_loop.stop) ipykernel-6.7.0/ipykernel/datapub.py000066400000000000000000000041521417004153500175240ustar00rootroot00000000000000"""Publishing native (typically pickled) objects. """ import warnings warnings.warn("ipykernel.datapub is deprecated. It has moved to ipyparallel.datapub", DeprecationWarning, stacklevel=2 ) # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from traitlets.config import Configurable from traitlets import Instance, Dict, CBytes, Any from ipykernel.jsonutil import json_clean try: # available since ipyparallel 5.0.0 from ipyparallel.serialize import serialize_object except ImportError: # Deprecated since ipykernel 4.3.0 from ipykernel.serialize import serialize_object from jupyter_client.session import Session, extract_header class ZMQDataPublisher(Configurable): topic = topic = CBytes(b'datapub') session = Instance(Session, allow_none=True) pub_socket = Any(allow_none=True) parent_header = Dict({}) def set_parent(self, parent): """Set the parent for outbound messages.""" self.parent_header = extract_header(parent) def publish_data(self, data): """publish a data_message on the IOPub channel Parameters ---------- data : dict The data to be published. Think of it as a namespace. """ session = self.session buffers = serialize_object(data, buffer_threshold=session.buffer_threshold, item_threshold=session.item_threshold, ) content = json_clean(dict(keys=list(data.keys()))) session.send(self.pub_socket, 'data_message', content=content, parent=self.parent_header, buffers=buffers, ident=self.topic, ) def publish_data(data): """publish a data_message on the IOPub channel Parameters ---------- data : dict The data to be published. Think of it as a namespace. """ warnings.warn("ipykernel.datapub is deprecated. It has moved to ipyparallel.datapub", DeprecationWarning, stacklevel=2 ) from ipykernel.zmqshell import ZMQInteractiveShell ZMQInteractiveShell.instance().data_pub.publish_data(data) ipykernel-6.7.0/ipykernel/debugger.py000066400000000000000000000532251417004153500176750ustar00rootroot00000000000000import os import re import zmq from zmq.utils import jsonapi from tornado.queues import Queue from tornado.locks import Event from IPython.core.getipython import get_ipython from IPython.core.inputtransformer2 import leading_empty_lines try: from jupyter_client.jsonutil import json_default except ImportError: from jupyter_client.jsonutil import date_default as json_default from .compiler import (get_file_name, get_tmp_directory, get_tmp_hash_seed) # This import is required to have the next ones working... from debugpy.server import api # noqa from _pydevd_bundle import pydevd_frame_utils from _pydevd_bundle.pydevd_suspended_frames import SuspendedFramesManager, _FramesTracker # Required for backwards compatiblity ROUTING_ID = getattr(zmq, 'ROUTING_ID', None) or zmq.IDENTITY class _FakeCode: def __init__(self, co_filename, co_name): self.co_filename = co_filename self.co_name = co_name class _FakeFrame: def __init__(self, f_code, f_globals, f_locals): self.f_code = f_code self.f_globals = f_globals self.f_locals = f_locals self.f_back = None class _DummyPyDB: def __init__(self): from _pydevd_bundle.pydevd_api import PyDevdAPI self.variable_presentation = PyDevdAPI.VariablePresentation() class VariableExplorer: def __init__(self): self.suspended_frame_manager = SuspendedFramesManager() self.py_db = _DummyPyDB() self.tracker = _FramesTracker(self.suspended_frame_manager, self.py_db) self.frame = None def track(self): var = get_ipython().user_ns self.frame = _FakeFrame(_FakeCode('', get_file_name('sys._getframe()')), var, var) self.tracker.track('thread1', pydevd_frame_utils.create_frames_list_from_frame(self.frame)) def untrack_all(self): self.tracker.untrack_all() def get_children_variables(self, variable_ref = None): var_ref = variable_ref if not var_ref: var_ref = id(self.frame) variables = self.suspended_frame_manager.get_variable(var_ref) return [x.get_var_data() for x in variables.get_children_variables()] class DebugpyMessageQueue: HEADER = 'Content-Length: ' HEADER_LENGTH = 16 SEPARATOR = '\r\n\r\n' SEPARATOR_LENGTH = 4 def __init__(self, event_callback, log): self.tcp_buffer = '' self._reset_tcp_pos() self.event_callback = event_callback self.message_queue = Queue() self.log = log def _reset_tcp_pos(self): self.header_pos = -1 self.separator_pos = -1 self.message_size = 0 self.message_pos = -1 def _put_message(self, raw_msg): self.log.debug('QUEUE - _put_message:') msg = jsonapi.loads(raw_msg) if msg['type'] == 'event': self.log.debug('QUEUE - received event:') self.log.debug(msg) self.event_callback(msg) else: self.log.debug('QUEUE - put message:') self.log.debug(msg) self.message_queue.put_nowait(msg) def put_tcp_frame(self, frame): self.tcp_buffer += frame self.log.debug('QUEUE - received frame') while True: # Finds header if self.header_pos == -1: self.header_pos = self.tcp_buffer.find(DebugpyMessageQueue.HEADER) if self.header_pos == -1: return self.log.debug('QUEUE - found header at pos %i', self.header_pos) #Finds separator if self.separator_pos == -1: hint = self.header_pos + DebugpyMessageQueue.HEADER_LENGTH self.separator_pos = self.tcp_buffer.find(DebugpyMessageQueue.SEPARATOR, hint) if self.separator_pos == -1: return self.log.debug('QUEUE - found separator at pos %i', self.separator_pos) if self.message_pos == -1: size_pos = self.header_pos + DebugpyMessageQueue.HEADER_LENGTH self.message_pos = self.separator_pos + DebugpyMessageQueue.SEPARATOR_LENGTH self.message_size = int(self.tcp_buffer[size_pos:self.separator_pos]) self.log.debug('QUEUE - found message at pos %i', self.message_pos) self.log.debug('QUEUE - message size is %i', self.message_size) if len(self.tcp_buffer) - self.message_pos < self.message_size: return self._put_message(self.tcp_buffer[self.message_pos:self.message_pos + self.message_size]) if len(self.tcp_buffer) - self.message_pos == self.message_size: self.log.debug('QUEUE - resetting tcp_buffer') self.tcp_buffer = '' self._reset_tcp_pos() return else: self.tcp_buffer = self.tcp_buffer[self.message_pos + self.message_size:] self.log.debug('QUEUE - slicing tcp_buffer: %s', self.tcp_buffer) self._reset_tcp_pos() async def get_message(self): return await self.message_queue.get() class DebugpyClient: def __init__(self, log, debugpy_stream, event_callback): self.log = log self.debugpy_stream = debugpy_stream self.event_callback = event_callback self.message_queue = DebugpyMessageQueue(self._forward_event, self.log) self.debugpy_host = '127.0.0.1' self.debugpy_port = -1 self.routing_id = None self.wait_for_attach = True self.init_event = Event() self.init_event_seq = -1 def _get_endpoint(self): host, port = self.get_host_port() return 'tcp://' + host + ':' + str(port) def _forward_event(self, msg): if msg['event'] == 'initialized': self.init_event.set() self.init_event_seq = msg['seq'] self.event_callback(msg) def _send_request(self, msg): if self.routing_id is None: self.routing_id = self.debugpy_stream.socket.getsockopt(ROUTING_ID) content = jsonapi.dumps( msg, default=json_default, ensure_ascii=False, allow_nan=False, ) content_length = str(len(content)) buf = (DebugpyMessageQueue.HEADER + content_length + DebugpyMessageQueue.SEPARATOR).encode('ascii') buf += content self.log.debug("DEBUGPYCLIENT:") self.log.debug(self.routing_id) self.log.debug(buf) self.debugpy_stream.send_multipart((self.routing_id, buf)) async def _wait_for_response(self): # Since events are never pushed to the message_queue # we can safely assume the next message in queue # will be an answer to the previous request return await self.message_queue.get_message() async def _handle_init_sequence(self): # 1] Waits for initialized event await self.init_event.wait() # 2] Sends configurationDone request configurationDone = { 'type': 'request', 'seq': int(self.init_event_seq) + 1, 'command': 'configurationDone' } self._send_request(configurationDone) # 3] Waits for configurationDone response await self._wait_for_response() # 4] Waits for attachResponse and returns it attach_rep = await self._wait_for_response() return attach_rep def get_host_port(self): if self.debugpy_port == -1: socket = self.debugpy_stream.socket socket.bind_to_random_port('tcp://' + self.debugpy_host) self.endpoint = socket.getsockopt(zmq.LAST_ENDPOINT).decode('utf-8') socket.unbind(self.endpoint) index = self.endpoint.rfind(':') self.debugpy_port = self.endpoint[index+1:] return self.debugpy_host, self.debugpy_port def connect_tcp_socket(self): self.debugpy_stream.socket.connect(self._get_endpoint()) self.routing_id = self.debugpy_stream.socket.getsockopt(ROUTING_ID) def disconnect_tcp_socket(self): self.debugpy_stream.socket.disconnect(self._get_endpoint()) self.routing_id = None self.init_event = Event() self.init_event_seq = -1 self.wait_for_attach = True def receive_dap_frame(self, frame): self.message_queue.put_tcp_frame(frame) async def send_dap_request(self, msg): self._send_request(msg) if self.wait_for_attach and msg['command'] == 'attach': rep = await self._handle_init_sequence() self.wait_for_attach = False return rep else: rep = await self._wait_for_response() self.log.debug('DEBUGPYCLIENT - returning:') self.log.debug(rep) return rep class Debugger: # Requests that requires that the debugger has started started_debug_msg_types = [ 'dumpCell', 'setBreakpoints', 'source', 'stackTrace', 'variables', 'attach', 'configurationDone' ] # Requests that can be handled even if the debugger is not running static_debug_msg_types = [ 'debugInfo', 'inspectVariables', 'richInspectVariables' ] def __init__(self, log, debugpy_stream, event_callback, shell_socket, session): self.log = log self.debugpy_client = DebugpyClient(log, debugpy_stream, self._handle_event) self.shell_socket = shell_socket self.session = session self.is_started = False self.event_callback = event_callback self.started_debug_handlers = {} for msg_type in Debugger.started_debug_msg_types: self.started_debug_handlers[msg_type] = getattr(self, msg_type) self.static_debug_handlers = {} for msg_type in Debugger.static_debug_msg_types: self.static_debug_handlers[msg_type] = getattr(self, msg_type) self.breakpoint_list = {} self.stopped_threads = [] self.debugpy_initialized = False self._removed_cleanup = {} self.debugpy_host = '127.0.0.1' self.debugpy_port = 0 self.endpoint = None self.variable_explorer = VariableExplorer() def _handle_event(self, msg): if msg['event'] == 'stopped': self.stopped_threads.append(msg['body']['threadId']) elif msg['event'] == 'continued': try: self.stopped_threads.remove(msg['body']['threadId']) except Exception: pass self.event_callback(msg) async def _forward_message(self, msg): return await self.debugpy_client.send_dap_request(msg) def _build_variables_response(self, request, variables): var_list = [var for var in variables if self.accept_variable(var['name'])] reply = { 'seq': request['seq'], 'type': 'response', 'request_seq': request['seq'], 'success': True, 'command': request['command'], 'body': { 'variables': var_list } } return reply @property def tcp_client(self): return self.debugpy_client def start(self): if not self.debugpy_initialized: tmp_dir = get_tmp_directory() if not os.path.exists(tmp_dir): os.makedirs(tmp_dir) host, port = self.debugpy_client.get_host_port() code = 'import debugpy;' code += 'debugpy.listen(("' + host + '",' + port + '))' content = { 'code': code, 'silent': True } self.session.send(self.shell_socket, 'execute_request', content, None, (self.shell_socket.getsockopt(ROUTING_ID))) ident, msg = self.session.recv(self.shell_socket, mode=0) self.debugpy_initialized = msg['content']['status'] == 'ok' # Don't remove leading empty lines when debugging so the breakpoints are correctly positioned cleanup_transforms = get_ipython().input_transformer_manager.cleanup_transforms if leading_empty_lines in cleanup_transforms: index = cleanup_transforms.index(leading_empty_lines) self._removed_cleanup[index] = cleanup_transforms.pop(index) self.debugpy_client.connect_tcp_socket() return self.debugpy_initialized def stop(self): self.debugpy_client.disconnect_tcp_socket() # Restore remove cleanup transformers cleanup_transforms = get_ipython().input_transformer_manager.cleanup_transforms for index in sorted(self._removed_cleanup): func = self._removed_cleanup.pop(index) cleanup_transforms.insert(index, func) async def dumpCell(self, message): code = message['arguments']['code'] file_name = get_file_name(code) with open(file_name, 'w', encoding='utf-8') as f: f.write(code) reply = { 'type': 'response', 'request_seq': message['seq'], 'success': True, 'command': message['command'], 'body': { 'sourcePath': file_name } } return reply async def setBreakpoints(self, message): source = message["arguments"]["source"]["path"] self.breakpoint_list[source] = message["arguments"]["breakpoints"] return await self._forward_message(message) async def source(self, message): reply = { 'type': 'response', 'request_seq': message['seq'], 'command': message['command'] } source_path = message["arguments"]["source"]["path"] if os.path.isfile(source_path): with open(source_path, encoding='utf-8') as f: reply['success'] = True reply['body'] = { 'content': f.read() } else: reply['success'] = False reply['message'] = 'source unavailable' reply['body'] = {} return reply async def stackTrace(self, message): reply = await self._forward_message(message) # The stackFrames array can have the following content: # { frames from the notebook} # ... # { 'id': xxx, 'name': '', ... } <= this is the first frame of the code from the notebook # { frames from ipykernel } # ... # {'id': yyy, 'name': '', ... } <= this is the first frame of ipykernel code # or only the frames from the notebook. # We want to remove all the frames from ipykernel when they are present. try: sf_list = reply["body"]["stackFrames"] module_idx = len(sf_list) - next( i for i, v in enumerate(reversed(sf_list), 1) if v["name"] == "" and i != 1 ) reply["body"]["stackFrames"] = reply["body"]["stackFrames"][ : module_idx + 1 ] except StopIteration: pass return reply def accept_variable(self, variable_name): forbid_list = [ '__name__', '__doc__', '__package__', '__loader__', '__spec__', '__annotations__', '__builtins__', '__builtin__', '__display__', 'get_ipython', 'debugpy', 'exit', 'quit', 'In', 'Out', '_oh', '_dh', '_', '__', '___' ] cond = variable_name not in forbid_list cond = cond and not bool(re.search(r'^_\d', variable_name)) cond = cond and variable_name[0:2] != '_i' return cond async def variables(self, message): reply = {} if not self.stopped_threads: variables = self.variable_explorer.get_children_variables(message['arguments']['variablesReference']) return self._build_variables_response(message, variables) else: reply = await self._forward_message(message) # TODO : check start and count arguments work as expected in debugpy reply['body']['variables'] = \ [var for var in reply['body']['variables'] if self.accept_variable(var['name'])] return reply async def attach(self, message): host, port = self.debugpy_client.get_host_port() message['arguments']['connect'] = { 'host': host, 'port': port } message['arguments']['logToFile'] = True # Reverts that option for now since it leads to spurious break of the code # in ipykernel source and resuming the execution leads to several errors # in the kernel. # Set debugOptions for breakpoints in python standard library source. # message['arguments']['debugOptions'] = [ 'DebugStdLib' ] return await self._forward_message(message) async def configurationDone(self, message): reply = { 'seq': message['seq'], 'type': 'response', 'request_seq': message['seq'], 'success': True, 'command': message['command'] } return reply async def debugInfo(self, message): breakpoint_list = [] for key, value in self.breakpoint_list.items(): breakpoint_list.append({ 'source': key, 'breakpoints': value }) reply = { 'type': 'response', 'request_seq': message['seq'], 'success': True, 'command': message['command'], 'body': { 'isStarted': self.is_started, 'hashMethod': 'Murmur2', 'hashSeed': get_tmp_hash_seed(), 'tmpFilePrefix': get_tmp_directory() + os.sep, 'tmpFileSuffix': '.py', 'breakpoints': breakpoint_list, 'stoppedThreads': self.stopped_threads, 'richRendering': True, 'exceptionPaths': ['Python Exceptions'] } } return reply async def inspectVariables(self, message): self.variable_explorer.untrack_all() # looks like the implementation of untrack_all in ptvsd # destroys objects we nee din track. We have no choice but # reinstantiate the object self.variable_explorer = VariableExplorer() self.variable_explorer.track() variables = self.variable_explorer.get_children_variables() return self._build_variables_response(message, variables) async def richInspectVariables(self, message): reply = { "type": "response", "sequence_seq": message["seq"], "success": False, "command": message["command"], } var_name = message["arguments"]["variableName"] valid_name = str.isidentifier(var_name) if not valid_name: reply["body"] = {"data": {}, "metadata": {}} if var_name == "special variables" or var_name == "function variables": reply["success"] = True return reply repr_data = {} repr_metadata = {} if not self.stopped_threads: # The code did not hit a breakpoint, we use the intepreter # to get the rich representation of the variable result = get_ipython().user_expressions({var_name: var_name})[var_name] if result.get("status", "error") == "ok": repr_data = result.get("data", {}) repr_metadata = result.get("metadata", {}) else: # The code has stopped on a breakpoint, we use the setExpression # request to get the rich representation of the variable code = f"get_ipython().display_formatter.format({var_name})" frame_id = message["arguments"]["frameId"] seq = message["seq"] reply = await self._forward_message( { "type": "request", "command": "evaluate", "seq": seq + 1, "arguments": {"expression": code, "frameId": frame_id}, } ) if reply["success"]: repr_data, repr_metadata = eval(reply["body"]["result"], {}, {}) body = { "data": repr_data, "metadata": {k: v for k, v in repr_metadata.items() if k in repr_data}, } reply["body"] = body reply["success"] = True return reply async def process_request(self, message): reply = {} if message['command'] == 'initialize': if self.is_started: self.log.info('The debugger has already started') else: self.is_started = self.start() if self.is_started: self.log.info('The debugger has started') else: reply = { 'command': 'initialize', 'request_seq': message['seq'], 'seq': 3, 'success': False, 'type': 'response' } handler = self.static_debug_handlers.get(message['command'], None) if handler is not None: reply = await handler(message) elif self.is_started: handler = self.started_debug_handlers.get(message['command'], None) if handler is not None: reply = await handler(message) else: reply = await self._forward_message(message) if message['command'] == 'disconnect': self.stop() self.breakpoint_list = {} self.stopped_threads = [] self.is_started = False self.log.info('The debugger has stopped') return reply ipykernel-6.7.0/ipykernel/displayhook.py000066400000000000000000000051101417004153500204250ustar00rootroot00000000000000"""Replacements for sys.displayhook that publish over ZMQ.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import builtins import sys from IPython.core.displayhook import DisplayHook from ipykernel.jsonutil import encode_images, json_clean from traitlets import Instance, Dict, Any from jupyter_client.session import extract_header, Session class ZMQDisplayHook: """A simple displayhook that publishes the object's repr over a ZeroMQ socket.""" topic = b'execute_result' def __init__(self, session, pub_socket): self.session = session self.pub_socket = pub_socket self.parent_header = {} def get_execution_count(self): """This method is replaced in kernelapp""" return 0 def __call__(self, obj): if obj is None: return builtins._ = obj sys.stdout.flush() sys.stderr.flush() contents = {'execution_count': self.get_execution_count(), 'data': {'text/plain': repr(obj)}, 'metadata': {}} self.session.send(self.pub_socket, 'execute_result', contents, parent=self.parent_header, ident=self.topic) def set_parent(self, parent): self.parent_header = extract_header(parent) class ZMQShellDisplayHook(DisplayHook): """A displayhook subclass that publishes data using ZeroMQ. This is intended to work with an InteractiveShell instance. It sends a dict of different representations of the object.""" topic=None session = Instance(Session, allow_none=True) pub_socket = Any(allow_none=True) parent_header = Dict({}) def set_parent(self, parent): """Set the parent for outbound messages.""" self.parent_header = extract_header(parent) def start_displayhook(self): self.msg = self.session.msg('execute_result', { 'data': {}, 'metadata': {}, }, parent=self.parent_header) def write_output_prompt(self): """Write the output prompt.""" self.msg['content']['execution_count'] = self.prompt_count def write_format_data(self, format_dict, md_dict=None): self.msg['content']['data'] = json_clean(encode_images(format_dict)) self.msg['content']['metadata'] = md_dict def finish_displayhook(self): """Finish up all displayhook activities.""" sys.stdout.flush() sys.stderr.flush() if self.msg['content']['data']: self.session.send(self.pub_socket, self.msg, ident=self.topic) self.msg = None ipykernel-6.7.0/ipykernel/embed.py000066400000000000000000000040111417004153500171520ustar00rootroot00000000000000"""Simple function for embedding an IPython kernel """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import sys from IPython.utils.frame import extract_module_locals from .kernelapp import IPKernelApp #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- def embed_kernel(module=None, local_ns=None, **kwargs): """Embed and start an IPython kernel in a given scope. Parameters ---------- module : ModuleType, optional The module to load into IPython globals (default: caller) local_ns : dict, optional The namespace to load into IPython user namespace (default: caller) **kwargs : various, optional Further keyword args are relayed to the IPKernelApp constructor, allowing configuration of the Kernel. Will only have an effect on the first embed_kernel call for a given process. """ # get the app if it exists, or set it up if it doesn't if IPKernelApp.initialized(): app = IPKernelApp.instance() else: app = IPKernelApp.instance(**kwargs) app.initialize([]) # Undo unnecessary sys module mangling from init_sys_modules. # This would not be necessary if we could prevent it # in the first place by using a different InteractiveShell # subclass, as in the regular embed case. main = app.kernel.shell._orig_sys_modules_main_mod if main is not None: sys.modules[app.kernel.shell._orig_sys_modules_main_name] = main # load the calling scope if not given (caller_module, caller_locals) = extract_module_locals(1) if module is None: module = caller_module if local_ns is None: local_ns = caller_locals app.kernel.user_module = module app.kernel.user_ns = local_ns app.shell.set_completer_frame() app.start() ipykernel-6.7.0/ipykernel/eventloops.py000066400000000000000000000322021417004153500202770ustar00rootroot00000000000000"""Event loop integration for the ZeroMQ-based kernels.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from functools import partial import os import sys import platform import zmq from distutils.version import LooseVersion as V from traitlets.config.application import Application def _use_appnope(): """Should we use appnope for dealing with OS X app nap? Checks if we are on OS X 10.9 or greater. """ return sys.platform == 'darwin' and V(platform.mac_ver()[0]) >= V('10.9') def _notify_stream_qt(kernel, stream): from IPython.external.qt_for_kernel import QtCore def process_stream_events(): """fall back to main loop when there's a socket event""" # call flush to ensure that the stream doesn't lose events # due to our consuming of the edge-triggered FD # flush returns the number of events consumed. # if there were any, wake it up if stream.flush(limit=1): notifier.setEnabled(False) kernel.app.quit() fd = stream.getsockopt(zmq.FD) notifier = QtCore.QSocketNotifier(fd, QtCore.QSocketNotifier.Read, kernel.app) notifier.activated.connect(process_stream_events) # there may already be unprocessed events waiting. # these events will not wake zmq's edge-triggered FD # since edge-triggered notification only occurs on new i/o activity. # process all the waiting events immediately # so we start in a clean state ensuring that any new i/o events will notify. # schedule first call on the eventloop as soon as it's running, # so we don't block here processing events timer = QtCore.QTimer(kernel.app) timer.setSingleShot(True) timer.timeout.connect(process_stream_events) timer.start(0) # mapping of keys to loop functions loop_map = { 'inline': None, 'nbagg': None, 'notebook': None, 'ipympl': None, 'widget': None, None: None, } def register_integration(*toolkitnames): """Decorator to register an event loop to integrate with the IPython kernel The decorator takes names to register the event loop as for the %gui magic. You can provide alternative names for the same toolkit. The decorated function should take a single argument, the IPython kernel instance, arrange for the event loop to call ``kernel.do_one_iteration()`` at least every ``kernel._poll_interval`` seconds, and start the event loop. :mod:`ipykernel.eventloops` provides and registers such functions for a few common event loops. """ def decorator(func): for name in toolkitnames: loop_map[name] = func func.exit_hook = lambda kernel: None def exit_decorator(exit_func): """@func.exit is now a decorator to register a function to be called on exit """ func.exit_hook = exit_func return exit_func func.exit = exit_decorator return func return decorator def _loop_qt(app): """Inner-loop for running the Qt eventloop Pulled from guisupport.start_event_loop in IPython < 5.2, since IPython 5.2 only checks `get_ipython().active_eventloop` is defined, rather than if the eventloop is actually running. """ app._in_event_loop = True app.exec_() app._in_event_loop = False @register_integration('qt4') def loop_qt4(kernel): """Start a kernel with PyQt4 event loop integration.""" from IPython.lib.guisupport import get_app_qt4 from IPython.external.qt_for_kernel import QtGui kernel.app = get_app_qt4([" "]) if isinstance(kernel.app, QtGui.QApplication): kernel.app.setQuitOnLastWindowClosed(False) _notify_stream_qt(kernel, kernel.shell_stream) _loop_qt(kernel.app) @register_integration('qt', 'qt5') def loop_qt5(kernel): """Start a kernel with PyQt5 event loop integration.""" if os.environ.get('QT_API', None) is None: try: import PyQt5 os.environ['QT_API'] = 'pyqt5' except ImportError: try: import PySide2 os.environ['QT_API'] = 'pyside2' except ImportError: os.environ['QT_API'] = 'pyqt5' return loop_qt4(kernel) # exit and watch are the same for qt 4 and 5 @loop_qt4.exit @loop_qt5.exit def loop_qt_exit(kernel): kernel.app.exit() def _loop_wx(app): """Inner-loop for running the Wx eventloop Pulled from guisupport.start_event_loop in IPython < 5.2, since IPython 5.2 only checks `get_ipython().active_eventloop` is defined, rather than if the eventloop is actually running. """ app._in_event_loop = True app.MainLoop() app._in_event_loop = False @register_integration('wx') def loop_wx(kernel): """Start a kernel with wx event loop support.""" import wx # Wx uses milliseconds poll_interval = int(1000 * kernel._poll_interval) def wake(): """wake from wx""" if kernel.shell_stream.flush(limit=1): kernel.app.ExitMainLoop() return # We have to put the wx.Timer in a wx.Frame for it to fire properly. # We make the Frame hidden when we create it in the main app below. class TimerFrame(wx.Frame): def __init__(self, func): wx.Frame.__init__(self, None, -1) self.timer = wx.Timer(self) # Units for the timer are in milliseconds self.timer.Start(poll_interval) self.Bind(wx.EVT_TIMER, self.on_timer) self.func = func def on_timer(self, event): self.func() # We need a custom wx.App to create our Frame subclass that has the # wx.Timer to defer back to the tornado event loop. class IPWxApp(wx.App): def OnInit(self): self.frame = TimerFrame(wake) self.frame.Show(False) return True # The redirect=False here makes sure that wx doesn't replace # sys.stdout/stderr with its own classes. if not ( getattr(kernel, 'app', None) and isinstance(kernel.app, wx.App) ): kernel.app = IPWxApp(redirect=False) # The import of wx on Linux sets the handler for signal.SIGINT # to 0. This is a bug in wx or gtk. We fix by just setting it # back to the Python default. import signal if not callable(signal.getsignal(signal.SIGINT)): signal.signal(signal.SIGINT, signal.default_int_handler) _loop_wx(kernel.app) @loop_wx.exit def loop_wx_exit(kernel): import wx wx.Exit() @register_integration('tk') def loop_tk(kernel): """Start a kernel with the Tk event loop.""" from tkinter import Tk, READABLE app = Tk() # Capability detection: # per https://docs.python.org/3/library/tkinter.html#file-handlers # file handlers are not available on Windows if hasattr(app, 'createfilehandler'): # A basic wrapper for structural similarity with the Windows version class BasicAppWrapper: def __init__(self, app): self.app = app self.app.withdraw() def process_stream_events(stream, *a, **kw): """fall back to main loop when there's a socket event""" if stream.flush(limit=1): app.tk.deletefilehandler(stream.getsockopt(zmq.FD)) app.quit() # For Tkinter, we create a Tk object and call its withdraw method. kernel.app_wrapper = BasicAppWrapper(app) notifier = partial(process_stream_events, kernel.shell_stream) # seems to be needed for tk notifier.__name__ = "notifier" app.tk.createfilehandler(kernel.shell_stream.getsockopt(zmq.FD), READABLE, notifier) # schedule initial call after start app.after(0, notifier) app.mainloop() else: import asyncio import nest_asyncio nest_asyncio.apply() doi = kernel.do_one_iteration # Tk uses milliseconds poll_interval = int(1000 * kernel._poll_interval) class TimedAppWrapper: def __init__(self, app, func): self.app = app self.app.withdraw() self.func = func def on_timer(self): loop = asyncio.get_event_loop() try: loop.run_until_complete(self.func()) except Exception: kernel.log.exception("Error in message handler") self.app.after(poll_interval, self.on_timer) def start(self): self.on_timer() # Call it once to get things going. self.app.mainloop() kernel.app_wrapper = TimedAppWrapper(app, doi) kernel.app_wrapper.start() @loop_tk.exit def loop_tk_exit(kernel): kernel.app_wrapper.app.destroy() @register_integration('gtk') def loop_gtk(kernel): """Start the kernel, coordinating with the GTK event loop""" from .gui.gtkembed import GTKEmbed gtk_kernel = GTKEmbed(kernel) gtk_kernel.start() kernel._gtk = gtk_kernel @loop_gtk.exit def loop_gtk_exit(kernel): kernel._gtk.stop() @register_integration('gtk3') def loop_gtk3(kernel): """Start the kernel, coordinating with the GTK event loop""" from .gui.gtk3embed import GTKEmbed gtk_kernel = GTKEmbed(kernel) gtk_kernel.start() kernel._gtk = gtk_kernel @loop_gtk3.exit def loop_gtk3_exit(kernel): kernel._gtk.stop() @register_integration('osx') def loop_cocoa(kernel): """Start the kernel, coordinating with the Cocoa CFRunLoop event loop via the matplotlib MacOSX backend. """ from ._eventloop_macos import mainloop, stop real_excepthook = sys.excepthook def handle_int(etype, value, tb): """don't let KeyboardInterrupts look like crashes""" # wake the eventloop when we get a signal stop() if etype is KeyboardInterrupt: print("KeyboardInterrupt caught in CFRunLoop", file=sys.__stdout__) else: real_excepthook(etype, value, tb) while not kernel.shell.exit_now: try: # double nested try/except, to properly catch KeyboardInterrupt # due to pyzmq Issue #130 try: # don't let interrupts during mainloop invoke crash_handler: sys.excepthook = handle_int mainloop(kernel._poll_interval) if kernel.shell_stream.flush(limit=1): # events to process, return control to kernel return except BaseException: raise except KeyboardInterrupt: # Ctrl-C shouldn't crash the kernel print("KeyboardInterrupt caught in kernel", file=sys.__stdout__) finally: # ensure excepthook is restored sys.excepthook = real_excepthook @loop_cocoa.exit def loop_cocoa_exit(kernel): from ._eventloop_macos import stop stop() @register_integration('asyncio') def loop_asyncio(kernel): '''Start a kernel with asyncio event loop support.''' import asyncio loop = asyncio.get_event_loop() # loop is already running (e.g. tornado 5), nothing left to do if loop.is_running(): return if loop.is_closed(): # main loop is closed, create a new one loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop._should_close = False # pause eventloop when there's an event on a zmq socket def process_stream_events(stream): """fall back to main loop when there's a socket event""" if stream.flush(limit=1): loop.stop() notifier = partial(process_stream_events, kernel.shell_stream) loop.add_reader(kernel.shell_stream.getsockopt(zmq.FD), notifier) loop.call_soon(notifier) while True: error = None try: loop.run_forever() except KeyboardInterrupt: continue except Exception as e: error = e if loop._should_close: loop.close() if error is not None: raise error break @loop_asyncio.exit def loop_asyncio_exit(kernel): """Exit hook for asyncio""" import asyncio loop = asyncio.get_event_loop() @asyncio.coroutine def close_loop(): if hasattr(loop, 'shutdown_asyncgens'): yield from loop.shutdown_asyncgens() loop._should_close = True loop.stop() if loop.is_running(): close_loop() elif not loop.is_closed(): loop.run_until_complete(close_loop) loop.close() def enable_gui(gui, kernel=None): """Enable integration with a given GUI""" if gui not in loop_map: e = "Invalid GUI request %r, valid ones are:%s" % (gui, loop_map.keys()) raise ValueError(e) if kernel is None: if Application.initialized(): kernel = getattr(Application.instance(), 'kernel', None) if kernel is None: raise RuntimeError("You didn't specify a kernel," " and no IPython Application with a kernel appears to be running." ) loop = loop_map[gui] if loop and kernel.eventloop is not None and kernel.eventloop is not loop: raise RuntimeError("Cannot activate multiple GUI eventloops") kernel.eventloop = loop ipykernel-6.7.0/ipykernel/gui/000077500000000000000000000000001417004153500163145ustar00rootroot00000000000000ipykernel-6.7.0/ipykernel/gui/__init__.py000066400000000000000000000011041417004153500204210ustar00rootroot00000000000000"""GUI support for the IPython ZeroMQ kernel. This package contains the various toolkit-dependent utilities we use to enable coordination between the IPython kernel and the event loops of the various GUI toolkits. """ #----------------------------------------------------------------------------- # Copyright (C) 2010-2011 The IPython Development Team. # # Distributed under the terms of the BSD License. # # The full license is in the file COPYING.txt, distributed as part of this # software. #----------------------------------------------------------------------------- ipykernel-6.7.0/ipykernel/gui/gtk3embed.py000066400000000000000000000062161417004153500205400ustar00rootroot00000000000000"""GUI support for the IPython ZeroMQ kernel - GTK toolkit support. """ #----------------------------------------------------------------------------- # Copyright (C) 2010-2011 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING.txt, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # stdlib import sys # Third-party import gi gi.require_version ('Gdk', '3.0') gi.require_version ('Gtk', '3.0') from gi.repository import GObject, Gtk #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- class GTKEmbed: """A class to embed a kernel into the GTK main event loop. """ def __init__(self, kernel): self.kernel = kernel # These two will later store the real gtk functions when we hijack them self.gtk_main = None self.gtk_main_quit = None def start(self): """Starts the GTK main event loop and sets our kernel startup routine. """ # Register our function to initiate the kernel and start gtk GObject.idle_add(self._wire_kernel) Gtk.main() def _wire_kernel(self): """Initializes the kernel inside GTK. This is meant to run only once at startup, so it does its job and returns False to ensure it doesn't get run again by GTK. """ self.gtk_main, self.gtk_main_quit = self._hijack_gtk() GObject.timeout_add(int(1000*self.kernel._poll_interval), self.iterate_kernel) return False def iterate_kernel(self): """Run one iteration of the kernel and return True. GTK timer functions must return True to be called again, so we make the call to :meth:`do_one_iteration` and then return True for GTK. """ self.kernel.do_one_iteration() return True def stop(self): # FIXME: this one isn't getting called because we have no reliable # kernel shutdown. We need to fix that: once the kernel has a # shutdown mechanism, it can call this. self.gtk_main_quit() sys.exit() def _hijack_gtk(self): """Hijack a few key functions in GTK for IPython integration. Modifies pyGTK's main and main_quit with a dummy so user code does not block IPython. This allows us to use %run to run arbitrary pygtk scripts from a long-lived IPython session, and when they attempt to start or stop Returns ------- The original functions that have been hijacked: - Gtk.main - Gtk.main_quit """ def dummy(*args, **kw): pass # save and trap main and main_quit from gtk orig_main, Gtk.main = Gtk.main, dummy orig_main_quit, Gtk.main_quit = Gtk.main_quit, dummy return orig_main, orig_main_quit ipykernel-6.7.0/ipykernel/gui/gtkembed.py000066400000000000000000000060631417004153500204550ustar00rootroot00000000000000"""GUI support for the IPython ZeroMQ kernel - GTK toolkit support. """ #----------------------------------------------------------------------------- # Copyright (C) 2010-2011 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING.txt, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # stdlib import sys # Third-party import gobject import gtk #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- class GTKEmbed: """A class to embed a kernel into the GTK main event loop. """ def __init__(self, kernel): self.kernel = kernel # These two will later store the real gtk functions when we hijack them self.gtk_main = None self.gtk_main_quit = None def start(self): """Starts the GTK main event loop and sets our kernel startup routine. """ # Register our function to initiate the kernel and start gtk gobject.idle_add(self._wire_kernel) gtk.main() def _wire_kernel(self): """Initializes the kernel inside GTK. This is meant to run only once at startup, so it does its job and returns False to ensure it doesn't get run again by GTK. """ self.gtk_main, self.gtk_main_quit = self._hijack_gtk() gobject.timeout_add(int(1000*self.kernel._poll_interval), self.iterate_kernel) return False def iterate_kernel(self): """Run one iteration of the kernel and return True. GTK timer functions must return True to be called again, so we make the call to :meth:`do_one_iteration` and then return True for GTK. """ self.kernel.do_one_iteration() return True def stop(self): # FIXME: this one isn't getting called because we have no reliable # kernel shutdown. We need to fix that: once the kernel has a # shutdown mechanism, it can call this. self.gtk_main_quit() sys.exit() def _hijack_gtk(self): """Hijack a few key functions in GTK for IPython integration. Modifies pyGTK's main and main_quit with a dummy so user code does not block IPython. This allows us to use %run to run arbitrary pygtk scripts from a long-lived IPython session, and when they attempt to start or stop Returns ------- The original functions that have been hijacked: - gtk.main - gtk.main_quit """ def dummy(*args, **kw): pass # save and trap main and main_quit from gtk orig_main, gtk.main = gtk.main, dummy orig_main_quit, gtk.main_quit = gtk.main_quit, dummy return orig_main, orig_main_quit ipykernel-6.7.0/ipykernel/heartbeat.py000066400000000000000000000102641417004153500200440ustar00rootroot00000000000000"""The client and server for a basic ping-pong style heartbeat. """ #----------------------------------------------------------------------------- # Copyright (C) 2008-2011 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import errno import os import socket from threading import Thread import zmq from jupyter_client.localinterfaces import localhost #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- class Heartbeat(Thread): "A simple ping-pong style heartbeat that runs in a thread." def __init__(self, context, addr=None): if addr is None: addr = ('tcp', localhost(), 0) Thread.__init__(self) self.context = context self.transport, self.ip, self.port = addr self.original_port = self.port if self.original_port == 0: self.pick_port() self.addr = (self.ip, self.port) self.daemon = True self.pydev_do_not_trace = True self.is_pydev_daemon_thread = True def pick_port(self): if self.transport == 'tcp': s = socket.socket() # '*' means all interfaces to 0MQ, which is '' to socket.socket s.bind(('' if self.ip == '*' else self.ip, 0)) self.port = s.getsockname()[1] s.close() elif self.transport == 'ipc': self.port = 1 while os.path.exists("%s-%s" % (self.ip, self.port)): self.port = self.port + 1 else: raise ValueError("Unrecognized zmq transport: %s" % self.transport) return self.port def _try_bind_socket(self): c = ':' if self.transport == 'tcp' else '-' return self.socket.bind('%s://%s' % (self.transport, self.ip) + c + str(self.port)) def _bind_socket(self): try: win_in_use = errno.WSAEADDRINUSE except AttributeError: win_in_use = None # Try up to 100 times to bind a port when in conflict to avoid # infinite attempts in bad setups max_attempts = 1 if self.original_port else 100 for attempt in range(max_attempts): try: self._try_bind_socket() except zmq.ZMQError as ze: if attempt == max_attempts - 1: raise # Raise if we have any error not related to socket binding if ze.errno != errno.EADDRINUSE and ze.errno != win_in_use: raise # Raise if we have any error not related to socket binding if self.original_port == 0: self.pick_port() else: raise else: return def run(self): self.socket = self.context.socket(zmq.ROUTER) self.socket.linger = 1000 try: self._bind_socket() except Exception: self.socket.close() raise while True: try: zmq.device(zmq.QUEUE, self.socket, self.socket) except zmq.ZMQError as e: if e.errno == errno.EINTR: # signal interrupt, resume heartbeat continue elif e.errno == zmq.ETERM: # context terminated, close socket and exit try: self.socket.close() except zmq.ZMQError: # suppress further errors during cleanup # this shouldn't happen, though pass break elif e.errno == zmq.ENOTSOCK: # socket closed elsewhere, exit break else: raise else: break ipykernel-6.7.0/ipykernel/inprocess/000077500000000000000000000000001417004153500175355ustar00rootroot00000000000000ipykernel-6.7.0/ipykernel/inprocess/__init__.py000066400000000000000000000003231417004153500216440ustar00rootroot00000000000000from .channels import ( InProcessChannel, InProcessHBChannel, ) from .client import InProcessKernelClient from .manager import InProcessKernelManager from .blocking import BlockingInProcessKernelClient ipykernel-6.7.0/ipykernel/inprocess/blocking.py000066400000000000000000000064741417004153500217120ustar00rootroot00000000000000""" Implements a fully blocking kernel client. Useful for test suites and blocking terminal interfaces. """ #----------------------------------------------------------------------------- # Copyright (C) 2012 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING.txt, distributed as part of this software. #----------------------------------------------------------------------------- from queue import Queue, Empty import sys # IPython imports from traitlets import Type # Local imports from .channels import ( InProcessChannel, ) from .client import InProcessKernelClient class BlockingInProcessChannel(InProcessChannel): def __init__(self, *args, **kwds): super().__init__(*args, **kwds) self._in_queue = Queue() def call_handlers(self, msg): self._in_queue.put(msg) def get_msg(self, block=True, timeout=None): """ Gets a message if there is one that is ready. """ if timeout is None: # Queue.get(timeout=None) has stupid uninteruptible # behavior, so wait for a week instead timeout = 604800 return self._in_queue.get(block, timeout) def get_msgs(self): """ Get all messages that are currently ready. """ msgs = [] while True: try: msgs.append(self.get_msg(block=False)) except Empty: break return msgs def msg_ready(self): """ Is there a message that has been received? """ return not self._in_queue.empty() class BlockingInProcessStdInChannel(BlockingInProcessChannel): def call_handlers(self, msg): """ Overridden for the in-process channel. This methods simply calls raw_input directly. """ msg_type = msg['header']['msg_type'] if msg_type == 'input_request': _raw_input = self.client.kernel._sys_raw_input prompt = msg['content']['prompt'] print(prompt, end='', file=sys.__stdout__) sys.__stdout__.flush() self.client.input(_raw_input()) class BlockingInProcessKernelClient(InProcessKernelClient): # The classes to use for the various channels. shell_channel_class = Type(BlockingInProcessChannel) iopub_channel_class = Type(BlockingInProcessChannel) stdin_channel_class = Type(BlockingInProcessStdInChannel) def wait_for_ready(self): # Wait for kernel info reply on shell channel while True: self.kernel_info() try: msg = self.shell_channel.get_msg(block=True, timeout=1) except Empty: pass else: if msg['msg_type'] == 'kernel_info_reply': # Checking that IOPub is connected. If it is not connected, start over. try: self.iopub_channel.get_msg(block=True, timeout=0.2) except Empty: pass else: self._handle_kernel_info_reply(msg) break # Flush IOPub channel while True: try: msg = self.iopub_channel.get_msg(block=True, timeout=0.2) print(msg['msg_type']) except Empty: break ipykernel-6.7.0/ipykernel/inprocess/channels.py000066400000000000000000000047461417004153500217150ustar00rootroot00000000000000"""A kernel client for in-process kernels.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from jupyter_client.channelsabc import HBChannelABC #----------------------------------------------------------------------------- # Channel classes #----------------------------------------------------------------------------- class InProcessChannel: """Base class for in-process channels.""" proxy_methods = [] def __init__(self, client=None): super().__init__() self.client = client self._is_alive = False def is_alive(self): return self._is_alive def start(self): self._is_alive = True def stop(self): self._is_alive = False def call_handlers(self, msg): """ This method is called in the main thread when a message arrives. Subclasses should override this method to handle incoming messages. """ raise NotImplementedError('call_handlers must be defined in a subclass.') def flush(self, timeout=1.0): pass def call_handlers_later(self, *args, **kwds): """ Call the message handlers later. The default implementation just calls the handlers immediately, but this method exists so that GUI toolkits can defer calling the handlers until after the event loop has run, as expected by GUI frontends. """ self.call_handlers(*args, **kwds) def process_events(self): """ Process any pending GUI events. This method will be never be called from a frontend without an event loop (e.g., a terminal frontend). """ raise NotImplementedError class InProcessHBChannel: """A dummy heartbeat channel interface for in-process kernels. Normally we use the heartbeat to check that the kernel process is alive. When the kernel is in-process, that doesn't make sense, but clients still expect this interface. """ time_to_dead = 3.0 def __init__(self, client=None): super().__init__() self.client = client self._is_alive = False self._pause = True def is_alive(self): return self._is_alive def start(self): self._is_alive = True def stop(self): self._is_alive = False def pause(self): self._pause = True def unpause(self): self._pause = False def is_beating(self): return not self._pause HBChannelABC.register(InProcessHBChannel) ipykernel-6.7.0/ipykernel/inprocess/client.py000066400000000000000000000161441417004153500213730ustar00rootroot00000000000000"""A client for in-process kernels.""" #----------------------------------------------------------------------------- # Copyright (C) 2012 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import asyncio # IPython imports from traitlets import Type, Instance, default from jupyter_client.clientabc import KernelClientABC from jupyter_client.client import KernelClient # Local imports from .channels import ( InProcessChannel, InProcessHBChannel, ) #----------------------------------------------------------------------------- # Main kernel Client class #----------------------------------------------------------------------------- class InProcessKernelClient(KernelClient): """A client for an in-process kernel. This class implements the interface of `jupyter_client.clientabc.KernelClientABC` and allows (asynchronous) frontends to be used seamlessly with an in-process kernel. See `jupyter_client.client.KernelClient` for docstrings. """ # The classes to use for the various channels. shell_channel_class = Type(InProcessChannel) iopub_channel_class = Type(InProcessChannel) stdin_channel_class = Type(InProcessChannel) control_channel_class = Type(InProcessChannel) hb_channel_class = Type(InProcessHBChannel) kernel = Instance('ipykernel.inprocess.ipkernel.InProcessKernel', allow_none=True) #-------------------------------------------------------------------------- # Channel management methods #-------------------------------------------------------------------------- @default('blocking_class') def _default_blocking_class(self): from .blocking import BlockingInProcessKernelClient return BlockingInProcessKernelClient def get_connection_info(self): d = super().get_connection_info() d['kernel'] = self.kernel return d def start_channels(self, *args, **kwargs): super().start_channels() self.kernel.frontends.append(self) @property def shell_channel(self): if self._shell_channel is None: self._shell_channel = self.shell_channel_class(self) return self._shell_channel @property def iopub_channel(self): if self._iopub_channel is None: self._iopub_channel = self.iopub_channel_class(self) return self._iopub_channel @property def stdin_channel(self): if self._stdin_channel is None: self._stdin_channel = self.stdin_channel_class(self) return self._stdin_channel @property def control_channel(self): if self._control_channel is None: self._control_channel = self.control_channel_class(self) return self._control_channel @property def hb_channel(self): if self._hb_channel is None: self._hb_channel = self.hb_channel_class(self) return self._hb_channel # Methods for sending specific messages # ------------------------------------- def execute(self, code, silent=False, store_history=True, user_expressions={}, allow_stdin=None): if allow_stdin is None: allow_stdin = self.allow_stdin content = dict(code=code, silent=silent, store_history=store_history, user_expressions=user_expressions, allow_stdin=allow_stdin) msg = self.session.msg('execute_request', content) self._dispatch_to_kernel(msg) return msg['header']['msg_id'] def complete(self, code, cursor_pos=None): if cursor_pos is None: cursor_pos = len(code) content = dict(code=code, cursor_pos=cursor_pos) msg = self.session.msg('complete_request', content) self._dispatch_to_kernel(msg) return msg['header']['msg_id'] def inspect(self, code, cursor_pos=None, detail_level=0): if cursor_pos is None: cursor_pos = len(code) content = dict(code=code, cursor_pos=cursor_pos, detail_level=detail_level, ) msg = self.session.msg('inspect_request', content) self._dispatch_to_kernel(msg) return msg['header']['msg_id'] def history(self, raw=True, output=False, hist_access_type='range', **kwds): content = dict(raw=raw, output=output, hist_access_type=hist_access_type, **kwds) msg = self.session.msg('history_request', content) self._dispatch_to_kernel(msg) return msg['header']['msg_id'] def shutdown(self, restart=False): # FIXME: What to do here? raise NotImplementedError('Cannot shutdown in-process kernel') def kernel_info(self): """Request kernel info.""" msg = self.session.msg('kernel_info_request') self._dispatch_to_kernel(msg) return msg['header']['msg_id'] def comm_info(self, target_name=None): """Request a dictionary of valid comms and their targets.""" if target_name is None: content = {} else: content = dict(target_name=target_name) msg = self.session.msg('comm_info_request', content) self._dispatch_to_kernel(msg) return msg['header']['msg_id'] def input(self, string): if self.kernel is None: raise RuntimeError('Cannot send input reply. No kernel exists.') self.kernel.raw_input_str = string def is_complete(self, code): msg = self.session.msg('is_complete_request', {'code': code}) self._dispatch_to_kernel(msg) return msg['header']['msg_id'] def _dispatch_to_kernel(self, msg): """ Send a message to the kernel and handle a reply. """ kernel = self.kernel if kernel is None: raise RuntimeError('Cannot send request. No kernel exists.') stream = kernel.shell_stream self.session.send(stream, msg) msg_parts = stream.recv_multipart() loop = asyncio.get_event_loop() loop.run_until_complete(kernel.dispatch_shell(msg_parts)) idents, reply_msg = self.session.recv(stream, copy=False) self.shell_channel.call_handlers_later(reply_msg) def get_shell_msg(self, block=True, timeout=None): return self.shell_channel.get_msg(block, timeout) def get_iopub_msg(self, block=True, timeout=None): return self.iopub_channel.get_msg(block, timeout) def get_stdin_msg(self, block=True, timeout=None): return self.stdin_channel.get_msg(block, timeout) def get_control_msg(self, block=True, timeout=None): return self.control_channel.get_msg(block, timeout) #----------------------------------------------------------------------------- # ABC Registration #----------------------------------------------------------------------------- KernelClientABC.register(InProcessKernelClient) ipykernel-6.7.0/ipykernel/inprocess/constants.py000066400000000000000000000004571417004153500221310ustar00rootroot00000000000000"""Shared constants. """ # Because inprocess communication is not networked, we can use a common Session # key everywhere. This is not just the empty bytestring to avoid tripping # certain security checks in the rest of Jupyter that assumes that empty keys # are insecure. INPROCESS_KEY = b'inprocess' ipykernel-6.7.0/ipykernel/inprocess/ipkernel.py000066400000000000000000000153361417004153500217300ustar00rootroot00000000000000"""An in-process kernel""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from contextlib import contextmanager import logging import sys from IPython.core.interactiveshell import InteractiveShellABC from ipykernel.jsonutil import json_clean from traitlets import Any, Enum, Instance, List, Type, default from ipykernel.ipkernel import IPythonKernel from ipykernel.zmqshell import ZMQInteractiveShell from .constants import INPROCESS_KEY from .socket import DummySocket from ..iostream import OutStream, BackgroundSocket, IOPubThread #----------------------------------------------------------------------------- # Main kernel class #----------------------------------------------------------------------------- class InProcessKernel(IPythonKernel): #------------------------------------------------------------------------- # InProcessKernel interface #------------------------------------------------------------------------- # The frontends connected to this kernel. frontends = List( Instance('ipykernel.inprocess.client.InProcessKernelClient', allow_none=True) ) # The GUI environment that the kernel is running under. This need not be # specified for the normal operation for the kernel, but is required for # IPython's GUI support (including pylab). The default is 'inline' because # it is safe under all GUI toolkits. gui = Enum(('tk', 'gtk', 'wx', 'qt', 'qt4', 'inline'), default_value='inline') raw_input_str = Any() stdout = Any() stderr = Any() #------------------------------------------------------------------------- # Kernel interface #------------------------------------------------------------------------- shell_class = Type(allow_none=True) _underlying_iopub_socket = Instance(DummySocket, ()) iopub_thread = Instance(IOPubThread) shell_stream = Instance(DummySocket, ()) @default('iopub_thread') def _default_iopub_thread(self): thread = IOPubThread(self._underlying_iopub_socket) thread.start() return thread iopub_socket = Instance(BackgroundSocket) @default('iopub_socket') def _default_iopub_socket(self): return self.iopub_thread.background_socket stdin_socket = Instance(DummySocket, ()) def __init__(self, **traits): super().__init__(**traits) self._underlying_iopub_socket.observe(self._io_dispatch, names=['message_sent']) self.shell.kernel = self async def execute_request(self, stream, ident, parent): """ Override for temporary IO redirection. """ with self._redirected_io(): await super().execute_request(stream, ident, parent) def start(self): """ Override registration of dispatchers for streams. """ self.shell.exit_now = False async def _abort_queues(self): """ The in-process kernel doesn't abort requests. """ pass async def _flush_control_queue(self): """No need to flush control queues for in-process""" pass def _input_request(self, prompt, ident, parent, password=False): # Flush output before making the request. self.raw_input_str = None sys.stderr.flush() sys.stdout.flush() # Send the input request. content = json_clean(dict(prompt=prompt, password=password)) msg = self.session.msg('input_request', content, parent) for frontend in self.frontends: if frontend.session.session == parent['header']['session']: frontend.stdin_channel.call_handlers(msg) break else: logging.error('No frontend found for raw_input request') return '' # Await a response. while self.raw_input_str is None: frontend.stdin_channel.process_events() return self.raw_input_str #------------------------------------------------------------------------- # Protected interface #------------------------------------------------------------------------- @contextmanager def _redirected_io(self): """ Temporarily redirect IO to the kernel. """ sys_stdout, sys_stderr = sys.stdout, sys.stderr sys.stdout, sys.stderr = self.stdout, self.stderr yield sys.stdout, sys.stderr = sys_stdout, sys_stderr #------ Trait change handlers -------------------------------------------- def _io_dispatch(self, change): """ Called when a message is sent to the IO socket. """ ident, msg = self.session.recv(self.iopub_socket.io_thread.socket, copy=False) for frontend in self.frontends: frontend.iopub_channel.call_handlers(msg) #------ Trait initializers ----------------------------------------------- @default('log') def _default_log(self): return logging.getLogger(__name__) @default('session') def _default_session(self): from jupyter_client.session import Session return Session(parent=self, key=INPROCESS_KEY) @default('shell_class') def _default_shell_class(self): return InProcessInteractiveShell @default('stdout') def _default_stdout(self): return OutStream(self.session, self.iopub_thread, 'stdout', watchfd=False) @default('stderr') def _default_stderr(self): return OutStream(self.session, self.iopub_thread, 'stderr', watchfd=False) #----------------------------------------------------------------------------- # Interactive shell subclass #----------------------------------------------------------------------------- class InProcessInteractiveShell(ZMQInteractiveShell): kernel = Instance('ipykernel.inprocess.ipkernel.InProcessKernel', allow_none=True) #------------------------------------------------------------------------- # InteractiveShell interface #------------------------------------------------------------------------- def enable_gui(self, gui=None): """Enable GUI integration for the kernel.""" if not gui: gui = self.kernel.gui self.active_eventloop = gui def enable_matplotlib(self, gui=None): """Enable matplotlib integration for the kernel.""" if not gui: gui = self.kernel.gui return super().enable_matplotlib(gui) def enable_pylab(self, gui=None, import_all=True, welcome_message=False): """Activate pylab support at runtime.""" if not gui: gui = self.kernel.gui return super().enable_pylab(gui, import_all, welcome_message) InteractiveShellABC.register(InProcessInteractiveShell) ipykernel-6.7.0/ipykernel/inprocess/manager.py000066400000000000000000000052601417004153500215240ustar00rootroot00000000000000"""A kernel manager for in-process kernels.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from traitlets import Instance, DottedObjectName, default from jupyter_client.managerabc import KernelManagerABC from jupyter_client.manager import KernelManager from jupyter_client.session import Session from .constants import INPROCESS_KEY class InProcessKernelManager(KernelManager): """A manager for an in-process kernel. This class implements the interface of `jupyter_client.kernelmanagerabc.KernelManagerABC` and allows (asynchronous) frontends to be used seamlessly with an in-process kernel. See `jupyter_client.kernelmanager.KernelManager` for docstrings. """ # The kernel process with which the KernelManager is communicating. kernel = Instance('ipykernel.inprocess.ipkernel.InProcessKernel', allow_none=True) # the client class for KM.client() shortcut client_class = DottedObjectName('ipykernel.inprocess.BlockingInProcessKernelClient') @default('blocking_class') def _default_blocking_class(self): from .blocking import BlockingInProcessKernelClient return BlockingInProcessKernelClient @default('session') def _default_session(self): # don't sign in-process messages return Session(key=INPROCESS_KEY, parent=self) #-------------------------------------------------------------------------- # Kernel management methods #-------------------------------------------------------------------------- def start_kernel(self, **kwds): from ipykernel.inprocess.ipkernel import InProcessKernel self.kernel = InProcessKernel(parent=self, session=self.session) def shutdown_kernel(self): self.kernel.iopub_thread.stop() self._kill_kernel() def restart_kernel(self, now=False, **kwds): self.shutdown_kernel() self.start_kernel(**kwds) @property def has_kernel(self): return self.kernel is not None def _kill_kernel(self): self.kernel = None def interrupt_kernel(self): raise NotImplementedError("Cannot interrupt in-process kernel.") def signal_kernel(self, signum): raise NotImplementedError("Cannot signal in-process kernel.") def is_alive(self): return self.kernel is not None def client(self, **kwargs): kwargs['kernel'] = self.kernel return super().client(**kwargs) #----------------------------------------------------------------------------- # ABC Registration #----------------------------------------------------------------------------- KernelManagerABC.register(InProcessKernelManager) ipykernel-6.7.0/ipykernel/inprocess/socket.py000066400000000000000000000024371417004153500214050ustar00rootroot00000000000000""" Defines a dummy socket implementing (part of) the zmq.Socket interface. """ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from queue import Queue import zmq from traitlets import HasTraits, Instance, Int #----------------------------------------------------------------------------- # Dummy socket class #----------------------------------------------------------------------------- class DummySocket(HasTraits): """ A dummy socket implementing (part of) the zmq.Socket interface. """ queue = Instance(Queue, ()) message_sent = Int(0) # Should be an Event context = Instance(zmq.Context) def _context_default(self): return zmq.Context() #------------------------------------------------------------------------- # Socket interface #------------------------------------------------------------------------- def recv_multipart(self, flags=0, copy=True, track=False): return self.queue.get_nowait() def send_multipart(self, msg_parts, flags=0, copy=True, track=False): msg_parts = list(map(zmq.Message, msg_parts)) self.queue.put_nowait(msg_parts) self.message_sent += 1 def flush(self, timeout=1.0): """no-op to comply with stream API""" pass ipykernel-6.7.0/ipykernel/inprocess/tests/000077500000000000000000000000001417004153500206775ustar00rootroot00000000000000ipykernel-6.7.0/ipykernel/inprocess/tests/__init__.py000066400000000000000000000000001417004153500227760ustar00rootroot00000000000000ipykernel-6.7.0/ipykernel/inprocess/tests/test_kernel.py000066400000000000000000000101441417004153500235700ustar00rootroot00000000000000# Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from io import StringIO import sys import unittest import pytest import tornado from ipykernel.inprocess.blocking import BlockingInProcessKernelClient from ipykernel.inprocess.manager import InProcessKernelManager from ipykernel.inprocess.ipkernel import InProcessKernel from ipykernel.tests.utils import assemble_output from IPython.utils.io import capture_output def _init_asyncio_patch(): """set default asyncio policy to be compatible with tornado Tornado 6 (at least) is not compatible with the default asyncio implementation on Windows Pick the older SelectorEventLoopPolicy on Windows if the known-incompatible default policy is in use. do this as early as possible to make it a low priority and overrideable ref: https://github.com/tornadoweb/tornado/issues/2608 FIXME: if/when tornado supports the defaults in asyncio, remove and bump tornado requirement for py38 """ if sys.platform.startswith("win") and sys.version_info >= (3, 8) and tornado.version_info < (6, 1): import asyncio try: from asyncio import ( WindowsProactorEventLoopPolicy, WindowsSelectorEventLoopPolicy, ) except ImportError: pass # not affected else: if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy: # WindowsProactorEventLoopPolicy is not compatible with tornado 6 # fallback to the pre-3.8 default of Selector asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy()) class InProcessKernelTestCase(unittest.TestCase): def setUp(self): _init_asyncio_patch() self.km = InProcessKernelManager() self.km.start_kernel() self.kc = self.km.client() self.kc.start_channels() self.kc.wait_for_ready() def test_pylab(self): """Does %pylab work in the in-process kernel?""" matplotlib = pytest.importorskip('matplotlib', reason='This test requires matplotlib') kc = self.kc kc.execute('%pylab') out, err = assemble_output(kc.get_iopub_msg) self.assertIn('matplotlib', out) def test_raw_input(self): """ Does the in-process kernel handle raw_input correctly? """ io = StringIO('foobar\n') sys_stdin = sys.stdin sys.stdin = io try: self.kc.execute('x = input()') finally: sys.stdin = sys_stdin assert self.km.kernel.shell.user_ns.get('x') == 'foobar' @pytest.mark.skipif( '__pypy__' in sys.builtin_module_names, reason="fails on pypy" ) def test_stdout(self): """ Does the in-process kernel correctly capture IO? """ kernel = InProcessKernel() with capture_output() as io: kernel.shell.run_cell('print("foo")') assert io.stdout == 'foo\n' kc = BlockingInProcessKernelClient(kernel=kernel, session=kernel.session) kernel.frontends.append(kc) kc.execute('print("bar")') out, err = assemble_output(kc.get_iopub_msg) assert out == 'bar\n' @pytest.mark.skip( reason="Currently don't capture during test as pytest does its own capturing" ) def test_capfd(self): """Does correctly capture fd""" kernel = InProcessKernel() with capture_output() as io: kernel.shell.run_cell('print("foo")') assert io.stdout == "foo\n" kc = BlockingInProcessKernelClient(kernel=kernel, session=kernel.session) kernel.frontends.append(kc) kc.execute("import os") kc.execute('os.system("echo capfd")') out, err = assemble_output(kc.iopub_channel) assert out == "capfd\n" def test_getpass_stream(self): "Tests that kernel getpass accept the stream parameter" kernel = InProcessKernel() kernel._allow_stdin = True kernel._input_request = lambda *args, **kwargs : None kernel.getpass(stream='non empty') ipykernel-6.7.0/ipykernel/inprocess/tests/test_kernelmanager.py000066400000000000000000000064311417004153500251270ustar00rootroot00000000000000# Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import unittest from ipykernel.inprocess.manager import InProcessKernelManager #----------------------------------------------------------------------------- # Test case #----------------------------------------------------------------------------- class InProcessKernelManagerTestCase(unittest.TestCase): def setUp(self): self.km = InProcessKernelManager() def tearDown(self): if self.km.has_kernel: self.km.shutdown_kernel() def test_interface(self): """ Does the in-process kernel manager implement the basic KM interface? """ km = self.km assert not km.has_kernel km.start_kernel() assert km.has_kernel assert km.kernel is not None kc = km.client() assert not kc.channels_running kc.start_channels() assert kc.channels_running old_kernel = km.kernel km.restart_kernel() self.assertIsNotNone(km.kernel) assert km.kernel != old_kernel km.shutdown_kernel() assert not km.has_kernel self.assertRaises(NotImplementedError, km.interrupt_kernel) self.assertRaises(NotImplementedError, km.signal_kernel, 9) kc.stop_channels() assert not kc.channels_running def test_execute(self): """ Does executing code in an in-process kernel work? """ km = self.km km.start_kernel() kc = km.client() kc.start_channels() kc.wait_for_ready() kc.execute('foo = 1') assert km.kernel.shell.user_ns['foo'] == 1 def test_complete(self): """ Does requesting completion from an in-process kernel work? """ km = self.km km.start_kernel() kc = km.client() kc.start_channels() kc.wait_for_ready() km.kernel.shell.push({'my_bar': 0, 'my_baz': 1}) kc.complete('my_ba', 5) msg = kc.get_shell_msg() assert msg['header']['msg_type'] == 'complete_reply' self.assertEqual(sorted(msg['content']['matches']), ['my_bar', 'my_baz']) def test_inspect(self): """ Does requesting object information from an in-process kernel work? """ km = self.km km.start_kernel() kc = km.client() kc.start_channels() kc.wait_for_ready() km.kernel.shell.user_ns['foo'] = 1 kc.inspect('foo') msg = kc.get_shell_msg() assert msg['header']['msg_type'] == 'inspect_reply' content = msg['content'] assert content['found'] text = content['data']['text/plain'] self.assertIn('int', text) def test_history(self): """ Does requesting history from an in-process kernel work? """ km = self.km km.start_kernel() kc = km.client() kc.start_channels() kc.wait_for_ready() kc.execute('1') kc.history(hist_access_type='tail', n=1) msg = kc.shell_channel.get_msgs()[-1] assert msg['header']['msg_type'] == 'history_reply' history = msg['content']['history'] assert len(history) == 1 assert history[0][2] == '1' if __name__ == '__main__': unittest.main() ipykernel-6.7.0/ipykernel/iostream.py000066400000000000000000000461351417004153500177360ustar00rootroot00000000000000"""Wrappers for forwarding stdout/stderr over zmq""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import atexit from binascii import b2a_hex from collections import deque from imp import lock_held as import_lock_held import os import sys import threading import warnings from weakref import WeakSet import traceback from io import StringIO, TextIOBase import io import zmq if zmq.pyzmq_version_info() >= (17, 0): from tornado.ioloop import IOLoop else: # deprecated since pyzmq 17 from zmq.eventloop.ioloop import IOLoop from zmq.eventloop.zmqstream import ZMQStream from jupyter_client.session import extract_header #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- MASTER = 0 CHILD = 1 #----------------------------------------------------------------------------- # IO classes #----------------------------------------------------------------------------- class IOPubThread: """An object for sending IOPub messages in a background thread Prevents a blocking main thread from delaying output from threads. IOPubThread(pub_socket).background_socket is a Socket-API-providing object whose IO is always run in a thread. """ def __init__(self, socket, pipe=False): """Create IOPub thread Parameters ---------- socket : zmq.PUB Socket the socket on which messages will be sent. pipe : bool Whether this process should listen for IOPub messages piped from subprocesses. """ self.socket = socket self.background_socket = BackgroundSocket(self) self._master_pid = os.getpid() self._pipe_flag = pipe self.io_loop = IOLoop(make_current=False) if pipe: self._setup_pipe_in() self._local = threading.local() self._events = deque() self._event_pipes = WeakSet() self._setup_event_pipe() self.thread = threading.Thread(target=self._thread_main) self.thread.daemon = True self.thread.pydev_do_not_trace = True self.thread.is_pydev_daemon_thread = True def _thread_main(self): """The inner loop that's actually run in a thread""" self.io_loop.make_current() self.io_loop.start() self.io_loop.close(all_fds=True) def _setup_event_pipe(self): """Create the PULL socket listening for events that should fire in this thread.""" ctx = self.socket.context pipe_in = ctx.socket(zmq.PULL) pipe_in.linger = 0 _uuid = b2a_hex(os.urandom(16)).decode('ascii') iface = self._event_interface = 'inproc://%s' % _uuid pipe_in.bind(iface) self._event_puller = ZMQStream(pipe_in, self.io_loop) self._event_puller.on_recv(self._handle_event) @property def _event_pipe(self): """thread-local event pipe for signaling events that should be processed in the thread""" try: event_pipe = self._local.event_pipe except AttributeError: # new thread, new event pipe ctx = self.socket.context event_pipe = ctx.socket(zmq.PUSH) event_pipe.linger = 0 event_pipe.connect(self._event_interface) self._local.event_pipe = event_pipe # WeakSet so that event pipes will be closed by garbage collection # when their threads are terminated self._event_pipes.add(event_pipe) return event_pipe def _handle_event(self, msg): """Handle an event on the event pipe Content of the message is ignored. Whenever *an* event arrives on the event stream, *all* waiting events are processed in order. """ # freeze event count so new writes don't extend the queue # while we are processing n_events = len(self._events) for i in range(n_events): event_f = self._events.popleft() event_f() def _setup_pipe_in(self): """setup listening pipe for IOPub from forked subprocesses""" ctx = self.socket.context # use UUID to authenticate pipe messages self._pipe_uuid = os.urandom(16) pipe_in = ctx.socket(zmq.PULL) pipe_in.linger = 0 try: self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1") except zmq.ZMQError as e: warnings.warn("Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e + "\nsubprocess output will be unavailable." ) self._pipe_flag = False pipe_in.close() return self._pipe_in = ZMQStream(pipe_in, self.io_loop) self._pipe_in.on_recv(self._handle_pipe_msg) def _handle_pipe_msg(self, msg): """handle a pipe message from a subprocess""" if not self._pipe_flag or not self._is_master_process(): return if msg[0] != self._pipe_uuid: print("Bad pipe message: %s", msg, file=sys.__stderr__) return self.send_multipart(msg[1:]) def _setup_pipe_out(self): # must be new context after fork ctx = zmq.Context() pipe_out = ctx.socket(zmq.PUSH) pipe_out.linger = 3000 # 3s timeout for pipe_out sends before discarding the message pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port) return ctx, pipe_out def _is_master_process(self): return os.getpid() == self._master_pid def _check_mp_mode(self): """check for forks, and switch to zmq pipeline if necessary""" if not self._pipe_flag or self._is_master_process(): return MASTER else: return CHILD def start(self): """Start the IOPub thread""" self.thread.start() # make sure we don't prevent process exit # I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be. atexit.register(self.stop) def stop(self): """Stop the IOPub thread""" if not self.thread.is_alive(): return self.io_loop.add_callback(self.io_loop.stop) self.thread.join() # close *all* event pipes, created in any thread # event pipes can only be used from other threads while self.thread.is_alive() # so after thread.join, this should be safe for event_pipe in self._event_pipes: event_pipe.close() def close(self): if self.closed: return self.socket.close() self.socket = None @property def closed(self): return self.socket is None def schedule(self, f): """Schedule a function to be called in our IO thread. If the thread is not running, call immediately. """ if self.thread.is_alive(): self._events.append(f) # wake event thread (message content is ignored) self._event_pipe.send(b'') else: f() def send_multipart(self, *args, **kwargs): """send_multipart schedules actual zmq send in my thread. If my thread isn't running (e.g. forked process), send immediately. """ self.schedule(lambda : self._really_send(*args, **kwargs)) def _really_send(self, msg, *args, **kwargs): """The callback that actually sends messages""" mp_mode = self._check_mp_mode() if mp_mode != CHILD: # we are master, do a regular send self.socket.send_multipart(msg, *args, **kwargs) else: # we are a child, pipe to master # new context/socket for every pipe-out # since forks don't teardown politely, use ctx.term to ensure send has completed ctx, pipe_out = self._setup_pipe_out() pipe_out.send_multipart([self._pipe_uuid] + msg, *args, **kwargs) pipe_out.close() ctx.term() class BackgroundSocket: """Wrapper around IOPub thread that provides zmq send[_multipart]""" io_thread = None def __init__(self, io_thread): self.io_thread = io_thread def __getattr__(self, attr): """Wrap socket attr access for backward-compatibility""" if attr.startswith('__') and attr.endswith('__'): # don't wrap magic methods super().__getattr__(attr) if hasattr(self.io_thread.socket, attr): warnings.warn( f"Accessing zmq Socket attribute {attr} on BackgroundSocket" f" is deprecated since ipykernel 4.3.0" f" use .io_thread.socket.{attr}", DeprecationWarning, stacklevel=2, ) return getattr(self.io_thread.socket, attr) super().__getattr__(attr) def __setattr__(self, attr, value): if attr == 'io_thread' or (attr.startswith('__' and attr.endswith('__'))): super().__setattr__(attr, value) else: warnings.warn( f"Setting zmq Socket attribute {attr} on BackgroundSocket" f" is deprecated since ipykernel 4.3.0" f" use .io_thread.socket.{attr}", DeprecationWarning, stacklevel=2, ) setattr(self.io_thread.socket, attr, value) def send(self, msg, *args, **kwargs): return self.send_multipart([msg], *args, **kwargs) def send_multipart(self, *args, **kwargs): """Schedule send in IO thread""" return self.io_thread.send_multipart(*args, **kwargs) class OutStream(TextIOBase): """A file like object that publishes the stream to a 0MQ PUB socket. Output is handed off to an IO Thread """ # timeout for flush to avoid infinite hang # in case of misbehavior flush_timeout = 10 # The time interval between automatic flushes, in seconds. flush_interval = 0.2 topic = None encoding = 'UTF-8' def fileno(self): """ Things like subprocess will peak and write to the fileno() of stderr/stdout. """ if getattr(self, "_original_stdstream_copy", None) is not None: return self._original_stdstream_copy else: raise io.UnsupportedOperation("fileno") def _watch_pipe_fd(self): """ We've redirected standards steams 0 and 1 into a pipe. We need to watch in a thread and redirect them to the right places. 1) the ZMQ channels to show in notebook interfaces, 2) the original stdout/err, to capture errors in terminals. We cannot schedule this on the ioloop thread, as this might be blocking. """ try: bts = os.read(self._fid, 1000) while bts and self._should_watch: self.write(bts.decode()) os.write(self._original_stdstream_copy, bts) bts = os.read(self._fid, 1000) except Exception: self._exc = sys.exc_info() def __init__( self, session, pub_thread, name, pipe=None, echo=None, *, watchfd=True, isatty=False, ): """ Parameters ---------- name : str {'stderr', 'stdout'} the name of the standard stream to replace watchfd : bool (default, True) Watch the file descripttor corresponding to the replaced stream. This is useful if you know some underlying code will write directly the file descriptor by its number. It will spawn a watching thread, that will swap the give file descriptor for a pipe, read from the pipe, and insert this into the current Stream. isatty : bool (default, False) Indication of whether this stream has termimal capabilities (e.g. can handle colors) """ if pipe is not None: warnings.warn( "pipe argument to OutStream is deprecated and ignored", " since ipykernel 4.2.3.", DeprecationWarning, stacklevel=2, ) # This is necessary for compatibility with Python built-in streams self.session = session if not isinstance(pub_thread, IOPubThread): # Backward-compat: given socket, not thread. Wrap in a thread. warnings.warn( "Since IPykernel 4.3, OutStream should be created with " "IOPubThread, not %r" % pub_thread, DeprecationWarning, stacklevel=2, ) pub_thread = IOPubThread(pub_thread) pub_thread.start() self.pub_thread = pub_thread self.name = name self.topic = b"stream." + name.encode() self.parent_header = {} self._master_pid = os.getpid() self._flush_pending = False self._subprocess_flush_pending = False self._io_loop = pub_thread.io_loop self._new_buffer() self.echo = None self._isatty = bool(isatty) if ( watchfd and (sys.platform.startswith("linux") or sys.platform.startswith("darwin")) and ("PYTEST_CURRENT_TEST" not in os.environ) ): # Pytest set its own capture. Dont redirect from within pytest. self._should_watch = True self._setup_stream_redirects(name) if echo: if hasattr(echo, 'read') and hasattr(echo, 'write'): self.echo = echo else: raise ValueError("echo argument must be a file like object") def isatty(self): """Return a bool indicating whether this is an 'interactive' stream. Returns: Boolean """ return self._isatty def _setup_stream_redirects(self, name): pr, pw = os.pipe() fno = getattr(sys, name).fileno() self._original_stdstream_copy = os.dup(fno) os.dup2(pw, fno) self._fid = pr self._exc = None self.watch_fd_thread = threading.Thread(target=self._watch_pipe_fd) self.watch_fd_thread.daemon = True self.watch_fd_thread.start() def _is_master_process(self): return os.getpid() == self._master_pid def set_parent(self, parent): self.parent_header = extract_header(parent) def close(self): if sys.platform.startswith("linux") or sys.platform.startswith("darwin"): self._should_watch = False self.watch_fd_thread.join() if self._exc: etype, value, tb = self._exc traceback.print_exception(etype, value, tb) self.pub_thread = None @property def closed(self): return self.pub_thread is None def _schedule_flush(self): """schedule a flush in the IO thread call this on write, to indicate that flush should be called soon. """ if self._flush_pending: return self._flush_pending = True # add_timeout has to be handed to the io thread via event pipe def _schedule_in_thread(): self._io_loop.call_later(self.flush_interval, self._flush) self.pub_thread.schedule(_schedule_in_thread) def flush(self): """trigger actual zmq send send will happen in the background thread """ if self.pub_thread and self.pub_thread.thread is not None and self.pub_thread.thread.is_alive(): # request flush on the background thread self.pub_thread.schedule(self._flush) # wait for flush to actually get through, if we can. # waiting across threads during import can cause deadlocks # so only wait if import lock is not held if not import_lock_held(): evt = threading.Event() self.pub_thread.schedule(evt.set) # and give a timeout to avoid if not evt.wait(self.flush_timeout): # write directly to __stderr__ instead of warning because # if this is happening sys.stderr may be the problem. print("IOStream.flush timed out", file=sys.__stderr__) else: self._flush() def _flush(self): """This is where the actual send happens. _flush should generally be called in the IO thread, unless the thread has been destroyed (e.g. forked subprocess). """ self._flush_pending = False self._subprocess_flush_pending = False if self.echo is not None: try: self.echo.flush() except OSError as e: if self.echo is not sys.__stderr__: print(f"Flush failed: {e}", file=sys.__stderr__) data = self._flush_buffer() if data: # FIXME: this disables Session's fork-safe check, # since pub_thread is itself fork-safe. # There should be a better way to do this. self.session.pid = os.getpid() content = {'name':self.name, 'text':data} self.session.send(self.pub_thread, 'stream', content=content, parent=self.parent_header, ident=self.topic) def write(self, string: str) -> int: """Write to current stream after encoding if necessary Returns ------- len : int number of items from input parameter written to stream. """ if not isinstance(string, str): raise TypeError( f"write() argument must be str, not {type(string)}" ) if self.echo is not None: try: self.echo.write(string) except OSError as e: if self.echo is not sys.__stderr__: print(f"Write failed: {e}", file=sys.__stderr__) if self.pub_thread is None: raise ValueError('I/O operation on closed file') else: is_child = (not self._is_master_process()) # only touch the buffer in the IO thread to avoid races self.pub_thread.schedule(lambda: self._buffer.write(string)) if is_child: # mp.Pool cannot be trusted to flush promptly (or ever), # and this helps. if self._subprocess_flush_pending: return self._subprocess_flush_pending = True # We can not rely on self._io_loop.call_later from a subprocess self.pub_thread.schedule(self._flush) else: self._schedule_flush() return len(string) def writelines(self, sequence): if self.pub_thread is None: raise ValueError('I/O operation on closed file') else: for string in sequence: self.write(string) def writable(self): return True def _flush_buffer(self): """clear the current buffer and return the current buffer data. This should only be called in the IO thread. """ data = '' if self._buffer is not None: buf = self._buffer self._new_buffer() data = buf.getvalue() buf.close() return data def _new_buffer(self): self._buffer = StringIO() ipykernel-6.7.0/ipykernel/ipkernel.py000066400000000000000000000514641417004153500177250ustar00rootroot00000000000000"""The IPython kernel implementation""" import asyncio import builtins from contextlib import contextmanager from functools import partial import getpass import signal import sys from IPython.core import release from IPython.utils.tokenutil import token_at_cursor, line_at_cursor from traitlets import Instance, Type, Any, List, Bool, observe, observe_compat from zmq.eventloop.zmqstream import ZMQStream from .comm import CommManager from .kernelbase import Kernel as KernelBase from .zmqshell import ZMQInteractiveShell from .eventloops import _use_appnope from .compiler import XCachingCompiler try: from IPython.core.interactiveshell import _asyncio_runner except ImportError: _asyncio_runner = None try: from IPython.core.completer import ( rectify_completions as _rectify_completions, provisionalcompleter as _provisionalcompleter, ) _use_experimental_60_completion = True except ImportError: _use_experimental_60_completion = False try: import debugpy from .debugger import Debugger _is_debugpy_available = True except ImportError: _is_debugpy_available = False _EXPERIMENTAL_KEY_NAME = '_jupyter_types_experimental' class IPythonKernel(KernelBase): shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True) shell_class = Type(ZMQInteractiveShell) use_experimental_completions = Bool(True, help="Set this flag to False to deactivate the use of experimental IPython completion APIs.", ).tag(config=True) debugpy_stream = Instance(ZMQStream, allow_none=True) if _is_debugpy_available else None user_module = Any() @observe('user_module') @observe_compat def _user_module_changed(self, change): if self.shell is not None: self.shell.user_module = change['new'] user_ns = Instance(dict, args=None, allow_none=True) @observe('user_ns') @observe_compat def _user_ns_changed(self, change): if self.shell is not None: self.shell.user_ns = change['new'] self.shell.init_user_ns() # A reference to the Python builtin 'raw_input' function. # (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3) _sys_raw_input = Any() _sys_eval_input = Any() def __init__(self, **kwargs): super().__init__(**kwargs) # Initialize the Debugger if _is_debugpy_available: self.debugger = Debugger(self.log, self.debugpy_stream, self._publish_debug_event, self.debug_shell_socket, self.session) # Initialize the InteractiveShell subclass self.shell = self.shell_class.instance(parent=self, profile_dir = self.profile_dir, user_module = self.user_module, user_ns = self.user_ns, kernel = self, compiler_class = XCachingCompiler, ) self.shell.displayhook.session = self.session self.shell.displayhook.pub_socket = self.iopub_socket self.shell.displayhook.topic = self._topic('execute_result') self.shell.display_pub.session = self.session self.shell.display_pub.pub_socket = self.iopub_socket self.comm_manager = CommManager(parent=self, kernel=self) self.shell.configurables.append(self.comm_manager) comm_msg_types = [ 'comm_open', 'comm_msg', 'comm_close' ] for msg_type in comm_msg_types: self.shell_handlers[msg_type] = getattr(self.comm_manager, msg_type) if _use_appnope() and self._darwin_app_nap: # Disable app-nap as the kernel is not a gui but can have guis import appnope appnope.nope() help_links = List([ { 'text': "Python Reference", 'url': "https://docs.python.org/%i.%i" % sys.version_info[:2], }, { 'text': "IPython Reference", 'url': "https://ipython.org/documentation.html", }, { 'text': "NumPy Reference", 'url': "https://docs.scipy.org/doc/numpy/reference/", }, { 'text': "SciPy Reference", 'url': "https://docs.scipy.org/doc/scipy/reference/", }, { 'text': "Matplotlib Reference", 'url': "https://matplotlib.org/contents.html", }, { 'text': "SymPy Reference", 'url': "http://docs.sympy.org/latest/index.html", }, { 'text': "pandas Reference", 'url': "https://pandas.pydata.org/pandas-docs/stable/", }, ]).tag(config=True) # Kernel info fields implementation = 'ipython' implementation_version = release.version language_info = { 'name': 'python', 'version': sys.version.split()[0], 'mimetype': 'text/x-python', 'codemirror_mode': { 'name': 'ipython', 'version': sys.version_info[0] }, 'pygments_lexer': 'ipython%d' % 3, 'nbconvert_exporter': 'python', 'file_extension': '.py' } def dispatch_debugpy(self, msg): if _is_debugpy_available: # The first frame is the socket id, we can drop it frame = msg[1].bytes.decode('utf-8') self.log.debug("Debugpy received: %s", frame) self.debugger.tcp_client.receive_dap_frame(frame) @property def banner(self): return self.shell.banner def start(self): self.shell.exit_now = False if self.debugpy_stream is None: self.log.warning("debugpy_stream undefined, debugging will not be enabled") else: self.debugpy_stream.on_recv(self.dispatch_debugpy, copy=False) super().start() def set_parent(self, ident, parent, channel='shell'): """Overridden from parent to tell the display hook and output streams about the parent message. """ super().set_parent(ident, parent, channel) if channel == 'shell': self.shell.set_parent(parent) def init_metadata(self, parent): """Initialize metadata. Run at the beginning of each execution request. """ md = super().init_metadata(parent) # FIXME: remove deprecated ipyparallel-specific code # This is required for ipyparallel < 5.0 md.update({ 'dependencies_met' : True, 'engine' : self.ident, }) return md def finish_metadata(self, parent, metadata, reply_content): """Finish populating metadata. Run after completing an execution request. """ # FIXME: remove deprecated ipyparallel-specific code # This is required by ipyparallel < 5.0 metadata["status"] = reply_content["status"] if ( reply_content["status"] == "error" and reply_content["ename"] == "UnmetDependency" ): metadata["dependencies_met"] = False return metadata def _forward_input(self, allow_stdin=False): """Forward raw_input and getpass to the current frontend. via input_request """ self._allow_stdin = allow_stdin self._sys_raw_input = builtins.input builtins.input = self.raw_input self._save_getpass = getpass.getpass getpass.getpass = self.getpass def _restore_input(self): """Restore raw_input, getpass""" builtins.input = self._sys_raw_input getpass.getpass = self._save_getpass @property def execution_count(self): return self.shell.execution_count @execution_count.setter def execution_count(self, value): # Ignore the incrementing done by KernelBase, in favour of our shell's # execution counter. pass @contextmanager def _cancel_on_sigint(self, future): """ContextManager for capturing SIGINT and cancelling a future SIGINT raises in the event loop when running async code, but we want it to halt a coroutine. Ideally, it would raise KeyboardInterrupt, but this turns it into a CancelledError. At least it gets a decent traceback to the user. """ sigint_future = asyncio.Future() # whichever future finishes first, # cancel the other one def cancel_unless_done(f, _ignored): if f.cancelled() or f.done(): return f.cancel() # when sigint finishes, # abort the coroutine with CancelledError sigint_future.add_done_callback( partial(cancel_unless_done, future) ) # when the main future finishes, # stop watching for SIGINT events future.add_done_callback( partial(cancel_unless_done, sigint_future) ) def handle_sigint(*args): def set_sigint_result(): if sigint_future.cancelled() or sigint_future.done(): return sigint_future.set_result(1) # use add_callback for thread safety self.io_loop.add_callback(set_sigint_result) # set the custom sigint hander during this context save_sigint = signal.signal(signal.SIGINT, handle_sigint) try: yield finally: # restore the previous sigint handler signal.signal(signal.SIGINT, save_sigint) async def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False): shell = self.shell # we'll need this a lot here self._forward_input(allow_stdin) reply_content = {} if hasattr(shell, 'run_cell_async') and hasattr(shell, 'should_run_async'): run_cell = shell.run_cell_async should_run_async = shell.should_run_async else: should_run_async = lambda cell: False # older IPython, # use blocking run_cell and wrap it in coroutine async def run_cell(*args, **kwargs): return shell.run_cell(*args, **kwargs) try: # default case: runner is asyncio and asyncio is already running # TODO: this should check every case for "are we inside the runner", # not just asyncio preprocessing_exc_tuple = None try: transformed_cell = self.shell.transform_cell(code) except Exception: transformed_cell = code preprocessing_exc_tuple = sys.exc_info() if ( _asyncio_runner and shell.loop_runner is _asyncio_runner and asyncio.get_event_loop().is_running() and should_run_async( code, transformed_cell=transformed_cell, preprocessing_exc_tuple=preprocessing_exc_tuple, ) ): coro = run_cell( code, store_history=store_history, silent=silent, transformed_cell=transformed_cell, preprocessing_exc_tuple=preprocessing_exc_tuple ) coro_future = asyncio.ensure_future(coro) with self._cancel_on_sigint(coro_future): res = None try: res = await coro_future finally: shell.events.trigger('post_execute') if not silent: shell.events.trigger('post_run_cell', res) else: # runner isn't already running, # make synchronous call, # letting shell dispatch to loop runners res = shell.run_cell(code, store_history=store_history, silent=silent) finally: self._restore_input() if res.error_before_exec is not None: err = res.error_before_exec else: err = res.error_in_exec if res.success: reply_content['status'] = 'ok' else: reply_content['status'] = 'error' reply_content.update({ 'traceback': shell._last_traceback or [], 'ename': str(type(err).__name__), 'evalue': str(err), }) # FIXME: deprecated piece for ipyparallel (remove in 5.0): e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='execute') reply_content['engine_info'] = e_info # Return the execution counter so clients can display prompts reply_content['execution_count'] = shell.execution_count - 1 if 'traceback' in reply_content: self.log.info("Exception in execute request:\n%s", '\n'.join(reply_content['traceback'])) # At this point, we can tell whether the main code execution succeeded # or not. If it did, we proceed to evaluate user_expressions if reply_content['status'] == 'ok': reply_content['user_expressions'] = \ shell.user_expressions(user_expressions or {}) else: # If there was an error, don't even try to compute expressions reply_content['user_expressions'] = {} # Payloads should be retrieved regardless of outcome, so we can both # recover partial output (that could have been generated early in a # block, before an error) and always clear the payload system. reply_content['payload'] = shell.payload_manager.read_payload() # Be aggressive about clearing the payload because we don't want # it to sit in memory until the next execute_request comes in. shell.payload_manager.clear_payload() return reply_content def do_complete(self, code, cursor_pos): if _use_experimental_60_completion and self.use_experimental_completions: return self._experimental_do_complete(code, cursor_pos) # FIXME: IPython completers currently assume single line, # but completion messages give multi-line context # For now, extract line from cell, based on cursor_pos: if cursor_pos is None: cursor_pos = len(code) line, offset = line_at_cursor(code, cursor_pos) line_cursor = cursor_pos - offset txt, matches = self.shell.complete('', line, line_cursor) return {'matches' : matches, 'cursor_end' : cursor_pos, 'cursor_start' : cursor_pos - len(txt), 'metadata' : {}, 'status' : 'ok'} async def do_debug_request(self, msg): if _is_debugpy_available: return await self.debugger.process_request(msg) def _experimental_do_complete(self, code, cursor_pos): """ Experimental completions from IPython, using Jedi. """ if cursor_pos is None: cursor_pos = len(code) with _provisionalcompleter(): raw_completions = self.shell.Completer.completions(code, cursor_pos) completions = list(_rectify_completions(code, raw_completions)) comps = [] for comp in completions: comps.append(dict( start=comp.start, end=comp.end, text=comp.text, type=comp.type, )) if completions: s = completions[0].start e = completions[0].end matches = [c.text for c in completions] else: s = cursor_pos e = cursor_pos matches = [] return {'matches': matches, 'cursor_end': e, 'cursor_start': s, 'metadata': {_EXPERIMENTAL_KEY_NAME: comps}, 'status': 'ok'} def do_inspect(self, code, cursor_pos, detail_level=0, omit_sections=()): name = token_at_cursor(code, cursor_pos) reply_content = {'status' : 'ok'} reply_content['data'] = {} reply_content['metadata'] = {} try: if release.version_info >= (8,): # `omit_sections` keyword will be available in IPython 8, see # https://github.com/ipython/ipython/pull/13343 bundle = self.shell.object_inspect_mime( name, detail_level=detail_level, omit_sections=omit_sections, ) else: bundle = self.shell.object_inspect_mime( name, detail_level=detail_level ) reply_content['data'].update(bundle) if not self.shell.enable_html_pager: reply_content['data'].pop('text/html') reply_content['found'] = True except KeyError: reply_content['found'] = False return reply_content def do_history(self, hist_access_type, output, raw, session=0, start=0, stop=None, n=None, pattern=None, unique=False): if hist_access_type == 'tail': hist = self.shell.history_manager.get_tail(n, raw=raw, output=output, include_latest=True) elif hist_access_type == 'range': hist = self.shell.history_manager.get_range(session, start, stop, raw=raw, output=output) elif hist_access_type == 'search': hist = self.shell.history_manager.search( pattern, raw=raw, output=output, n=n, unique=unique) else: hist = [] return { 'status': 'ok', 'history' : list(hist), } def do_shutdown(self, restart): self.shell.exit_now = True return dict(status='ok', restart=restart) def do_is_complete(self, code): transformer_manager = getattr(self.shell, 'input_transformer_manager', None) if transformer_manager is None: # input_splitter attribute is deprecated transformer_manager = self.shell.input_splitter status, indent_spaces = transformer_manager.check_complete(code) r = {'status': status} if status == 'incomplete': r['indent'] = ' ' * indent_spaces return r def do_apply(self, content, bufs, msg_id, reply_metadata): from .serialize import serialize_object, unpack_apply_message shell = self.shell try: working = shell.user_ns prefix = "_"+str(msg_id).replace("-","")+"_" f,args,kwargs = unpack_apply_message(bufs, working, copy=False) fname = getattr(f, '__name__', 'f') fname = prefix+"f" argname = prefix+"args" kwargname = prefix+"kwargs" resultname = prefix+"result" ns = { fname : f, argname : args, kwargname : kwargs , resultname : None } # print ns working.update(ns) code = "%s = %s(*%s,**%s)" % (resultname, fname, argname, kwargname) try: exec(code, shell.user_global_ns, shell.user_ns) result = working.get(resultname) finally: for key in ns: working.pop(key) result_buf = serialize_object(result, buffer_threshold=self.session.buffer_threshold, item_threshold=self.session.item_threshold, ) except BaseException as e: # invoke IPython traceback formatting shell.showtraceback() reply_content = { "traceback": shell._last_traceback or [], "ename": str(type(e).__name__), "evalue": str(e), } # FIXME: deprecated piece for ipyparallel (remove in 5.0): e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='apply') reply_content['engine_info'] = e_info self.send_response(self.iopub_socket, 'error', reply_content, ident=self._topic('error'), channel='shell') self.log.info("Exception in apply request:\n%s", '\n'.join(reply_content['traceback'])) result_buf = [] reply_content['status'] = 'error' else: reply_content = {'status' : 'ok'} return reply_content, result_buf def do_clear(self): self.shell.reset(False) return dict(status='ok') # This exists only for backwards compatibility - use IPythonKernel instead class Kernel(IPythonKernel): def __init__(self, *args, **kwargs): import warnings warnings.warn('Kernel is a deprecated alias of ipykernel.ipkernel.IPythonKernel', DeprecationWarning) super().__init__(*args, **kwargs) ipykernel-6.7.0/ipykernel/jsonutil.py000066400000000000000000000115101417004153500177470ustar00rootroot00000000000000"""Utilities to manipulate JSON objects.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from binascii import b2a_base64 import math import re import types from datetime import datetime import numbers from jupyter_client._version import version_info as jupyter_client_version next_attr_name = '__next__' #----------------------------------------------------------------------------- # Globals and constants #----------------------------------------------------------------------------- # timestamp formats ISO8601 = "%Y-%m-%dT%H:%M:%S.%f" ISO8601_PAT=re.compile(r"^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})(\.\d{1,6})?Z?([\+\-]\d{2}:?\d{2})?$") # holy crap, strptime is not threadsafe. # Calling it once at import seems to help. datetime.strptime("1", "%d") #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- # constants for identifying png/jpeg data PNG = b'\x89PNG\r\n\x1a\n' # front of PNG base64-encoded PNG64 = b'iVBORw0KG' JPEG = b'\xff\xd8' # front of JPEG base64-encoded JPEG64 = b'/9' # constants for identifying gif data GIF_64 = b'R0lGODdh' GIF89_64 = b'R0lGODlh' # front of PDF base64-encoded PDF64 = b'JVBER' JUPYTER_CLIENT_MAJOR_VERSION = jupyter_client_version[0] def encode_images(format_dict): """b64-encodes images in a displaypub format dict Perhaps this should be handled in json_clean itself? Parameters ---------- format_dict : dict A dictionary of display data keyed by mime-type Returns ------- format_dict : dict A copy of the same dictionary, but binary image data ('image/png', 'image/jpeg' or 'application/pdf') is base64-encoded. """ # no need for handling of ambiguous bytestrings on Python 3, # where bytes objects always represent binary data and thus # base64-encoded. return format_dict def json_clean(obj): """Deprecated, this is a no-op for jupyter-client>=7. Clean an object to ensure it's safe to encode in JSON. Atomic, immutable objects are returned unmodified. Sets and tuples are converted to lists, lists are copied and dicts are also copied. Note: dicts whose keys could cause collisions upon encoding (such as a dict with both the number 1 and the string '1' as keys) will cause a ValueError to be raised. Parameters ---------- obj : any python object Returns ------- out : object A version of the input which will not cause an encoding error when encoded as JSON. Note that this function does not *encode* its inputs, it simply sanitizes it so that there will be no encoding errors later. """ if JUPYTER_CLIENT_MAJOR_VERSION >= 7: return obj # types that are 'atomic' and ok in json as-is. atomic_ok = (str, type(None)) # containers that we need to convert into lists container_to_list = (tuple, set, types.GeneratorType) # Since bools are a subtype of Integrals, which are a subtype of Reals, # we have to check them in that order. if isinstance(obj, bool): return obj if isinstance(obj, numbers.Integral): # cast int to int, in case subclasses override __str__ (e.g. boost enum, #4598) return int(obj) if isinstance(obj, numbers.Real): # cast out-of-range floats to their reprs if math.isnan(obj) or math.isinf(obj): return repr(obj) return float(obj) if isinstance(obj, atomic_ok): return obj if isinstance(obj, bytes): # unanmbiguous binary data is base64-encoded # (this probably should have happened upstream) return b2a_base64(obj).decode('ascii') if isinstance(obj, container_to_list) or ( hasattr(obj, '__iter__') and hasattr(obj, next_attr_name)): obj = list(obj) if isinstance(obj, list): return [json_clean(x) for x in obj] if isinstance(obj, dict): # First, validate that the dict won't lose data in conversion due to # key collisions after stringification. This can happen with keys like # True and 'true' or 1 and '1', which collide in JSON. nkeys = len(obj) nkeys_collapsed = len(set(map(str, obj))) if nkeys != nkeys_collapsed: raise ValueError('dict cannot be safely converted to JSON: ' 'key collision would lead to dropped values') # If all OK, proceed by making the new dict that will be json-safe out = {} for k,v in obj.items(): out[str(k)] = json_clean(v) return out if isinstance(obj, datetime): return obj.strftime(ISO8601) # we don't understand it, it's probably an unserializable object raise ValueError("Can't clean for JSON: %r" % obj) ipykernel-6.7.0/ipykernel/kernelapp.py000066400000000000000000000654171417004153500201000ustar00rootroot00000000000000"""An Application for launching a kernel""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import atexit import os import sys import errno import signal import traceback import logging from functools import partial from io import TextIOWrapper, FileIO from logging import StreamHandler from tornado import ioloop import zmq from zmq.eventloop.zmqstream import ZMQStream from IPython.core.application import ( BaseIPythonApplication, base_flags, base_aliases, catch_config_error ) from IPython.core.profiledir import ProfileDir from IPython.core.shellapp import ( InteractiveShellApp, shell_flags, shell_aliases ) from traitlets import ( Any, Instance, Dict, Unicode, Integer, Bool, DottedObjectName, Type, default ) from traitlets.utils.importstring import import_item from traitlets.utils import filefind from jupyter_core.paths import jupyter_runtime_dir from jupyter_client import write_connection_file from jupyter_client.connect import ConnectionFileMixin # local imports from .iostream import IOPubThread from .control import ControlThread from .heartbeat import Heartbeat from .ipkernel import IPythonKernel from .parentpoller import ParentPollerUnix, ParentPollerWindows from jupyter_client.session import ( Session, session_flags, session_aliases, ) from .zmqshell import ZMQInteractiveShell #----------------------------------------------------------------------------- # Flags and Aliases #----------------------------------------------------------------------------- kernel_aliases = dict(base_aliases) kernel_aliases.update({ 'ip' : 'IPKernelApp.ip', 'hb' : 'IPKernelApp.hb_port', 'shell' : 'IPKernelApp.shell_port', 'iopub' : 'IPKernelApp.iopub_port', 'stdin' : 'IPKernelApp.stdin_port', 'control' : 'IPKernelApp.control_port', 'f' : 'IPKernelApp.connection_file', 'transport': 'IPKernelApp.transport', }) kernel_flags = dict(base_flags) kernel_flags.update({ 'no-stdout' : ( {'IPKernelApp' : {'no_stdout' : True}}, "redirect stdout to the null device"), 'no-stderr' : ( {'IPKernelApp' : {'no_stderr' : True}}, "redirect stderr to the null device"), 'pylab' : ( {'IPKernelApp' : {'pylab' : 'auto'}}, """Pre-load matplotlib and numpy for interactive use with the default matplotlib backend."""), 'trio-loop' : ( {'InteractiveShell' : {'trio_loop' : False}}, 'Enable Trio as main event loop.' ), }) # inherit flags&aliases for any IPython shell apps kernel_aliases.update(shell_aliases) kernel_flags.update(shell_flags) # inherit flags&aliases for Sessions kernel_aliases.update(session_aliases) kernel_flags.update(session_flags) _ctrl_c_message = """\ NOTE: When using the `ipython kernel` entry point, Ctrl-C will not work. To exit, you will have to explicitly quit this process, by either sending "quit" from a client, or using Ctrl-\\ in UNIX-like environments. To read more about this, see https://github.com/ipython/ipython/issues/2049 """ #----------------------------------------------------------------------------- # Application class for starting an IPython Kernel #----------------------------------------------------------------------------- class IPKernelApp(BaseIPythonApplication, InteractiveShellApp, ConnectionFileMixin): name='ipython-kernel' aliases = Dict(kernel_aliases) flags = Dict(kernel_flags) classes = [IPythonKernel, ZMQInteractiveShell, ProfileDir, Session] # the kernel class, as an importstring kernel_class = Type('ipykernel.ipkernel.IPythonKernel', klass='ipykernel.kernelbase.Kernel', help="""The Kernel subclass to be used. This should allow easy re-use of the IPKernelApp entry point to configure and launch kernels other than IPython's own. """).tag(config=True) kernel = Any() poller = Any() # don't restrict this even though current pollers are all Threads heartbeat = Instance(Heartbeat, allow_none=True) context = Any() shell_socket = Any() control_socket = Any() debugpy_socket = Any() debug_shell_socket = Any() stdin_socket = Any() iopub_socket = Any() iopub_thread = Any() control_thread = Any() _ports = Dict() subcommands = { 'install': ( 'ipykernel.kernelspec.InstallIPythonKernelSpecApp', 'Install the IPython kernel' ), } # connection info: connection_dir = Unicode() @default('connection_dir') def _default_connection_dir(self): return jupyter_runtime_dir() @property def abs_connection_file(self): if os.path.basename(self.connection_file) == self.connection_file: return os.path.join(self.connection_dir, self.connection_file) else: return self.connection_file # streams, etc. no_stdout = Bool(False, help="redirect stdout to the null device").tag(config=True) no_stderr = Bool(False, help="redirect stderr to the null device").tag(config=True) trio_loop = Bool(False, help="Set main event loop.").tag(config=True) quiet = Bool(True, help="Only send stdout/stderr to output stream").tag(config=True) outstream_class = DottedObjectName('ipykernel.iostream.OutStream', help="The importstring for the OutStream factory").tag(config=True) displayhook_class = DottedObjectName('ipykernel.displayhook.ZMQDisplayHook', help="The importstring for the DisplayHook factory").tag(config=True) capture_fd_output = Bool( True, help="""Attempt to capture and forward low-level output, e.g. produced by Extension libraries. """, ).tag(config=True) # polling parent_handle = Integer(int(os.environ.get('JPY_PARENT_PID') or 0), help="""kill this process if its parent dies. On Windows, the argument specifies the HANDLE of the parent process, otherwise it is simply boolean. """).tag(config=True) interrupt = Integer(int(os.environ.get('JPY_INTERRUPT_EVENT') or 0), help="""ONLY USED ON WINDOWS Interrupt this process when the parent is signaled. """).tag(config=True) def init_crash_handler(self): sys.excepthook = self.excepthook def excepthook(self, etype, evalue, tb): # write uncaught traceback to 'real' stderr, not zmq-forwarder traceback.print_exception(etype, evalue, tb, file=sys.__stderr__) def init_poller(self): if sys.platform == 'win32': if self.interrupt or self.parent_handle: self.poller = ParentPollerWindows(self.interrupt, self.parent_handle) elif self.parent_handle and self.parent_handle != 1: # PID 1 (init) is special and will never go away, # only be reassigned. # Parent polling doesn't work if ppid == 1 to start with. self.poller = ParentPollerUnix() def _try_bind_socket(self, s, port): iface = '%s://%s' % (self.transport, self.ip) if self.transport == 'tcp': if port <= 0: port = s.bind_to_random_port(iface) else: s.bind("tcp://%s:%i" % (self.ip, port)) elif self.transport == 'ipc': if port <= 0: port = 1 path = "%s-%i" % (self.ip, port) while os.path.exists(path): port = port + 1 path = "%s-%i" % (self.ip, port) else: path = "%s-%i" % (self.ip, port) s.bind("ipc://%s" % path) return port def _bind_socket(self, s, port): try: win_in_use = errno.WSAEADDRINUSE except AttributeError: win_in_use = None # Try up to 100 times to bind a port when in conflict to avoid # infinite attempts in bad setups max_attempts = 1 if port else 100 for attempt in range(max_attempts): try: return self._try_bind_socket(s, port) except zmq.ZMQError as ze: # Raise if we have any error not related to socket binding if ze.errno != errno.EADDRINUSE and ze.errno != win_in_use: raise if attempt == max_attempts - 1: raise def write_connection_file(self): """write connection info to JSON file""" cf = self.abs_connection_file self.log.debug("Writing connection file: %s", cf) write_connection_file(cf, ip=self.ip, key=self.session.key, transport=self.transport, shell_port=self.shell_port, stdin_port=self.stdin_port, hb_port=self.hb_port, iopub_port=self.iopub_port, control_port=self.control_port) def cleanup_connection_file(self): cf = self.abs_connection_file self.log.debug("Cleaning up connection file: %s", cf) try: os.remove(cf) except OSError: pass self.cleanup_ipc_files() def init_connection_file(self): if not self.connection_file: self.connection_file = "kernel-%s.json"%os.getpid() try: self.connection_file = filefind(self.connection_file, ['.', self.connection_dir]) except OSError: self.log.debug("Connection file not found: %s", self.connection_file) # This means I own it, and I'll create it in this directory: os.makedirs(os.path.dirname(self.abs_connection_file), mode=0o700, exist_ok=True) # Also, I will clean it up: atexit.register(self.cleanup_connection_file) return try: self.load_connection_file() except Exception: self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True) self.exit(1) def init_sockets(self): # Create a context, a session, and the kernel sockets. self.log.info("Starting the kernel at pid: %i", os.getpid()) assert self.context is None, "init_sockets cannot be called twice!" self.context = context = zmq.Context() atexit.register(self.close) self.shell_socket = context.socket(zmq.ROUTER) self.shell_socket.linger = 1000 self.shell_port = self._bind_socket(self.shell_socket, self.shell_port) self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port) self.stdin_socket = context.socket(zmq.ROUTER) self.stdin_socket.linger = 1000 self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port) self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port) if hasattr(zmq, 'ROUTER_HANDOVER'): # set router-handover to workaround zeromq reconnect problems # in certain rare circumstances # see ipython/ipykernel#270 and zeromq/libzmq#2892 self.shell_socket.router_handover = \ self.stdin_socket.router_handover = 1 self.init_control(context) self.init_iopub(context) def init_control(self, context): self.control_socket = context.socket(zmq.ROUTER) self.control_socket.linger = 1000 self.control_port = self._bind_socket(self.control_socket, self.control_port) self.log.debug("control ROUTER Channel on port: %i" % self.control_port) self.debugpy_socket = context.socket(zmq.STREAM) self.debugpy_socket.linger = 1000 self.debug_shell_socket = context.socket(zmq.DEALER) self.debug_shell_socket.linger = 1000 if self.shell_socket.getsockopt(zmq.LAST_ENDPOINT): self.debug_shell_socket.connect(self.shell_socket.getsockopt(zmq.LAST_ENDPOINT)) if hasattr(zmq, 'ROUTER_HANDOVER'): # set router-handover to workaround zeromq reconnect problems # in certain rare circumstances # see ipython/ipykernel#270 and zeromq/libzmq#2892 self.control_socket.router_handover = 1 self.control_thread = ControlThread(daemon=True) def init_iopub(self, context): self.iopub_socket = context.socket(zmq.PUB) self.iopub_socket.linger = 1000 self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port) self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port) self.configure_tornado_logger() self.iopub_thread = IOPubThread(self.iopub_socket, pipe=True) self.iopub_thread.start() # backward-compat: wrap iopub socket API in background thread self.iopub_socket = self.iopub_thread.background_socket def init_heartbeat(self): """start the heart beating""" # heartbeat doesn't share context, because it mustn't be blocked # by the GIL, which is accessed by libzmq when freeing zero-copy messages hb_ctx = zmq.Context() self.heartbeat = Heartbeat(hb_ctx, (self.transport, self.ip, self.hb_port)) self.hb_port = self.heartbeat.port self.log.debug("Heartbeat REP Channel on port: %i" % self.hb_port) self.heartbeat.start() def close(self): """Close zmq sockets in an orderly fashion""" # un-capture IO before we start closing channels self.reset_io() self.log.info("Cleaning up sockets") if self.heartbeat: self.log.debug("Closing heartbeat channel") self.heartbeat.context.term() if self.iopub_thread: self.log.debug("Closing iopub channel") self.iopub_thread.stop() self.iopub_thread.close() if self.control_thread and self.control_thread.is_alive(): self.log.debug("Closing control thread") self.control_thread.stop() self.control_thread.join() if self.debugpy_socket and not self.debugpy_socket.closed: self.debugpy_socket.close() if self.debug_shell_socket and not self.debug_shell_socket.closed: self.debug_shell_socket.close() for channel in ('shell', 'control', 'stdin'): self.log.debug("Closing %s channel", channel) socket = getattr(self, channel + "_socket", None) if socket and not socket.closed: socket.close() self.log.debug("Terminating zmq context") self.context.term() self.log.debug("Terminated zmq context") def log_connection_info(self): """display connection info, and store ports""" basename = os.path.basename(self.connection_file) if basename == self.connection_file or \ os.path.dirname(self.connection_file) == self.connection_dir: # use shortname tail = basename else: tail = self.connection_file lines = [ "To connect another client to this kernel, use:", " --existing %s" % tail, ] # log connection info # info-level, so often not shown. # frontends should use the %connect_info magic # to see the connection info for line in lines: self.log.info(line) # also raw print to the terminal if no parent_handle (`ipython kernel`) # unless log-level is CRITICAL (--quiet) if not self.parent_handle and self.log_level < logging.CRITICAL: print(_ctrl_c_message, file=sys.__stdout__) for line in lines: print(line, file=sys.__stdout__) self._ports = dict(shell=self.shell_port, iopub=self.iopub_port, stdin=self.stdin_port, hb=self.hb_port, control=self.control_port) def init_blackhole(self): """redirects stdout/stderr to devnull if necessary""" if self.no_stdout or self.no_stderr: blackhole = open(os.devnull, 'w') if self.no_stdout: sys.stdout = sys.__stdout__ = blackhole if self.no_stderr: sys.stderr = sys.__stderr__ = blackhole def init_io(self): """Redirect input streams and set a display hook.""" if self.outstream_class: outstream_factory = import_item(str(self.outstream_class)) if sys.stdout is not None: sys.stdout.flush() e_stdout = None if self.quiet else sys.__stdout__ e_stderr = None if self.quiet else sys.__stderr__ if not self.capture_fd_output: outstream_factory = partial(outstream_factory, watchfd=False) sys.stdout = outstream_factory(self.session, self.iopub_thread, 'stdout', echo=e_stdout) if sys.stderr is not None: sys.stderr.flush() sys.stderr = outstream_factory( self.session, self.iopub_thread, "stderr", echo=e_stderr ) if hasattr(sys.stderr, "_original_stdstream_copy"): for handler in self.log.handlers: if isinstance(handler, StreamHandler) and ( handler.stream.buffer.fileno() == 2 ): self.log.debug( "Seeing logger to stderr, rerouting to raw filedescriptor." ) handler.stream = TextIOWrapper( FileIO(sys.stderr._original_stdstream_copy, "w") ) if self.displayhook_class: displayhook_factory = import_item(str(self.displayhook_class)) self.displayhook = displayhook_factory(self.session, self.iopub_socket) sys.displayhook = self.displayhook self.patch_io() def reset_io(self): """restore original io restores state after init_io """ sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ sys.displayhook = sys.__displayhook__ def patch_io(self): """Patch important libraries that can't handle sys.stdout forwarding""" try: import faulthandler except ImportError: pass else: # Warning: this is a monkeypatch of `faulthandler.enable`, watch for possible # updates to the upstream API and update accordingly (up-to-date as of Python 3.5): # https://docs.python.org/3/library/faulthandler.html#faulthandler.enable # change default file to __stderr__ from forwarded stderr faulthandler_enable = faulthandler.enable def enable(file=sys.__stderr__, all_threads=True, **kwargs): return faulthandler_enable(file=file, all_threads=all_threads, **kwargs) faulthandler.enable = enable if hasattr(faulthandler, 'register'): faulthandler_register = faulthandler.register def register(signum, file=sys.__stderr__, all_threads=True, chain=False, **kwargs): return faulthandler_register(signum, file=file, all_threads=all_threads, chain=chain, **kwargs) faulthandler.register = register def init_signal(self): signal.signal(signal.SIGINT, signal.SIG_IGN) def init_kernel(self): """Create the Kernel object itself""" shell_stream = ZMQStream(self.shell_socket) control_stream = ZMQStream(self.control_socket, self.control_thread.io_loop) debugpy_stream = ZMQStream(self.debugpy_socket, self.control_thread.io_loop) self.control_thread.start() kernel_factory = self.kernel_class.instance kernel = kernel_factory(parent=self, session=self.session, control_stream=control_stream, debugpy_stream=debugpy_stream, debug_shell_socket=self.debug_shell_socket, shell_stream=shell_stream, control_thread=self.control_thread, iopub_thread=self.iopub_thread, iopub_socket=self.iopub_socket, stdin_socket=self.stdin_socket, log=self.log, profile_dir=self.profile_dir, user_ns=self.user_ns, ) kernel.record_ports({ name + '_port': port for name, port in self._ports.items() }) self.kernel = kernel # Allow the displayhook to get the execution count self.displayhook.get_execution_count = lambda: kernel.execution_count def init_gui_pylab(self): """Enable GUI event loop integration, taking pylab into account.""" # Register inline backend as default # this is higher priority than matplotlibrc, # but lower priority than anything else (mpl.use() for instance). # This only affects matplotlib >= 1.5 if not os.environ.get('MPLBACKEND'): os.environ['MPLBACKEND'] = 'module://matplotlib_inline.backend_inline' # Provide a wrapper for :meth:`InteractiveShellApp.init_gui_pylab` # to ensure that any exception is printed straight to stderr. # Normally _showtraceback associates the reply with an execution, # which means frontends will never draw it, as this exception # is not associated with any execute request. shell = self.shell _showtraceback = shell._showtraceback try: # replace error-sending traceback with stderr def print_tb(etype, evalue, stb): print ("GUI event loop or pylab initialization failed", file=sys.stderr) print (shell.InteractiveTB.stb2text(stb), file=sys.stderr) shell._showtraceback = print_tb InteractiveShellApp.init_gui_pylab(self) finally: shell._showtraceback = _showtraceback def init_shell(self): self.shell = getattr(self.kernel, 'shell', None) if self.shell: self.shell.configurables.append(self) def configure_tornado_logger(self): """ Configure the tornado logging.Logger. Must set up the tornado logger or else tornado will call basicConfig for the root logger which makes the root logger go to the real sys.stderr instead of the capture streams. This function mimics the setup of logging.basicConfig. """ logger = logging.getLogger('tornado') handler = logging.StreamHandler() formatter = logging.Formatter(logging.BASIC_FORMAT) handler.setFormatter(formatter) logger.addHandler(handler) def _init_asyncio_patch(self): """set default asyncio policy to be compatible with tornado Tornado 6 (at least) is not compatible with the default asyncio implementation on Windows Pick the older SelectorEventLoopPolicy on Windows if the known-incompatible default policy is in use. Support for Proactor via a background thread is available in tornado 6.1, but it is still preferable to run the Selector in the main thread instead of the background. do this as early as possible to make it a low priority and overrideable ref: https://github.com/tornadoweb/tornado/issues/2608 FIXME: if/when tornado supports the defaults in asyncio without threads, remove and bump tornado requirement for py38. Most likely, this will mean a new Python version where asyncio.ProactorEventLoop supports add_reader and friends. """ if sys.platform.startswith("win") and sys.version_info >= (3, 8): import asyncio try: from asyncio import ( WindowsProactorEventLoopPolicy, WindowsSelectorEventLoopPolicy, ) except ImportError: pass # not affected else: if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy: # WindowsProactorEventLoopPolicy is not compatible with tornado 6 # fallback to the pre-3.8 default of Selector asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy()) def init_pdb(self): """Replace pdb with IPython's version that is interruptible. With the non-interruptible version, stopping pdb() locks up the kernel in a non-recoverable state. """ import pdb from IPython.core import debugger if hasattr(debugger, "InterruptiblePdb"): # Only available in newer IPython releases: debugger.Pdb = debugger.InterruptiblePdb pdb.Pdb = debugger.Pdb pdb.set_trace = debugger.set_trace @catch_config_error def initialize(self, argv=None): self._init_asyncio_patch() super().initialize(argv) if self.subapp is not None: return self.init_pdb() self.init_blackhole() self.init_connection_file() self.init_poller() self.init_sockets() self.init_heartbeat() # writing/displaying connection info must be *after* init_sockets/heartbeat self.write_connection_file() # Log connection info after writing connection file, so that the connection # file is definitely available at the time someone reads the log. self.log_connection_info() self.init_io() try: self.init_signal() except Exception: # Catch exception when initializing signal fails, eg when running the # kernel on a separate thread if self.log_level < logging.CRITICAL: self.log.error("Unable to initialize signal:", exc_info=True) self.init_kernel() # shell init steps self.init_path() self.init_shell() if self.shell: self.init_gui_pylab() self.init_extensions() self.init_code() # flush stdout/stderr, so that anything written to these streams during # initialization do not get associated with the first execution request sys.stdout.flush() sys.stderr.flush() def start(self): if self.subapp is not None: return self.subapp.start() if self.poller is not None: self.poller.start() self.kernel.start() self.io_loop = ioloop.IOLoop.current() if self.trio_loop: from ipykernel.trio_runner import TrioRunner tr = TrioRunner() tr.initialize(self.kernel, self.io_loop) try: tr.run() except KeyboardInterrupt: pass else: try: self.io_loop.start() except KeyboardInterrupt: pass launch_new_instance = IPKernelApp.launch_instance def main(): """Run an IPKernel as an application""" app = IPKernelApp.instance() app.initialize() app.start() if __name__ == '__main__': main() ipykernel-6.7.0/ipykernel/kernelbase.py000066400000000000000000001161151417004153500202220ustar00rootroot00000000000000"""Base class for a kernel that talks to frontends over 0MQ.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import asyncio import concurrent.futures from datetime import datetime from functools import partial import itertools import logging import inspect import os from signal import signal, default_int_handler, SIGINT import sys import time import uuid import warnings try: import psutil except ImportError: psutil = None try: # jupyter_client >= 5, use tz-aware now from jupyter_client.session import utcnow as now except ImportError: # jupyter_client < 5, use local now() now = datetime.now from tornado import ioloop from tornado.queues import Queue, QueueEmpty import zmq from zmq.eventloop.zmqstream import ZMQStream from traitlets.config.configurable import SingletonConfigurable from IPython.core.error import StdinNotImplementedError from ipykernel.jsonutil import json_clean from traitlets import ( Any, Instance, Float, Dict, List, Set, Integer, Unicode, Bool, observe, default ) from jupyter_client.session import Session from ._version import kernel_protocol_version class Kernel(SingletonConfigurable): #--------------------------------------------------------------------------- # Kernel interface #--------------------------------------------------------------------------- # attribute to override with a GUI eventloop = Any(None) @observe('eventloop') def _update_eventloop(self, change): """schedule call to eventloop from IOLoop""" loop = ioloop.IOLoop.current() if change.new is not None: loop.add_callback(self.enter_eventloop) session = Instance(Session, allow_none=True) profile_dir = Instance('IPython.core.profiledir.ProfileDir', allow_none=True) shell_stream = Instance(ZMQStream, allow_none=True) shell_streams = List( help="""Deprecated shell_streams alias. Use shell_stream .. versionchanged:: 6.0 shell_streams is deprecated. Use shell_stream. """ ) @default("shell_streams") def _shell_streams_default(self): warnings.warn( "Kernel.shell_streams is deprecated in ipykernel 6.0. Use Kernel.shell_stream", DeprecationWarning, stacklevel=2, ) if self.shell_stream is not None: return [self.shell_stream] else: return [] @observe("shell_streams") def _shell_streams_changed(self, change): warnings.warn( "Kernel.shell_streams is deprecated in ipykernel 6.0. Use Kernel.shell_stream", DeprecationWarning, stacklevel=2, ) if len(change.new) > 1: warnings.warn( "Kernel only supports one shell stream. Additional streams will be ignored.", RuntimeWarning, stacklevel=2, ) if change.new: self.shell_stream = change.new[0] control_stream = Instance(ZMQStream, allow_none=True) debug_shell_socket = Any() control_thread = Any() iopub_socket = Any() iopub_thread = Any() stdin_socket = Any() log = Instance(logging.Logger, allow_none=True) # identities: int_id = Integer(-1) ident = Unicode() @default('ident') def _default_ident(self): return str(uuid.uuid4()) # This should be overridden by wrapper kernels that implement any real # language. language_info = {} # any links that should go in the help menu help_links = List() # Private interface _darwin_app_nap = Bool(True, help="""Whether to use appnope for compatibility with OS X App Nap. Only affects OS X >= 10.9. """ ).tag(config=True) # track associations with current request _allow_stdin = Bool(False) _parents = Dict({"shell": {}, "control": {}}) _parent_ident = Dict({'shell': b'', 'control': b''}) @property def _parent_header(self): warnings.warn( "Kernel._parent_header is deprecated in ipykernel 6. Use .get_parent()", DeprecationWarning, stacklevel=2, ) return self.get_parent(channel="shell") # Time to sleep after flushing the stdout/err buffers in each execute # cycle. While this introduces a hard limit on the minimal latency of the # execute cycle, it helps prevent output synchronization problems for # clients. # Units are in seconds. The minimum zmq latency on local host is probably # ~150 microseconds, set this to 500us for now. We may need to increase it # a little if it's not enough after more interactive testing. _execute_sleep = Float(0.0005).tag(config=True) # Frequency of the kernel's event loop. # Units are in seconds, kernel subclasses for GUI toolkits may need to # adapt to milliseconds. _poll_interval = Float(0.01).tag(config=True) stop_on_error_timeout = Float( 0.0, config=True, help="""time (in seconds) to wait for messages to arrive when aborting queued requests after an error. Requests that arrive within this window after an error will be cancelled. Increase in the event of unusually slow network causing significant delays, which can manifest as e.g. "Run all" in a notebook aborting some, but not all, messages after an error. """ ) # If the shutdown was requested over the network, we leave here the # necessary reply message so it can be sent by our registered atexit # handler. This ensures that the reply is only sent to clients truly at # the end of our shutdown process (which happens after the underlying # IPython shell's own shutdown). _shutdown_message = None # This is a dict of port number that the kernel is listening on. It is set # by record_ports and used by connect_request. _recorded_ports = Dict() # set of aborted msg_ids aborted = Set() # Track execution count here. For IPython, we override this to use the # execution count we store in the shell. execution_count = 0 msg_types = [ 'execute_request', 'complete_request', 'inspect_request', 'history_request', 'comm_info_request', 'kernel_info_request', 'connect_request', 'shutdown_request', 'is_complete_request', 'interrupt_request', # deprecated: 'apply_request', ] # add deprecated ipyparallel control messages control_msg_types = msg_types + ['clear_request', 'abort_request', 'debug_request', 'usage_request'] def __init__(self, **kwargs): super().__init__(**kwargs) # Build dict of handlers for message types self.shell_handlers = {} for msg_type in self.msg_types: self.shell_handlers[msg_type] = getattr(self, msg_type) self.control_handlers = {} for msg_type in self.control_msg_types: self.control_handlers[msg_type] = getattr(self, msg_type) self.control_queue = Queue() def dispatch_control(self, msg): self.control_queue.put_nowait(msg) async def poll_control_queue(self): while True: msg = await self.control_queue.get() # handle tracers from _flush_control_queue if isinstance(msg, (concurrent.futures.Future, asyncio.Future)): msg.set_result(None) continue await self.process_control(msg) async def _flush_control_queue(self): """Flush the control queue, wait for processing of any pending messages""" if self.control_thread: control_loop = self.control_thread.io_loop # concurrent.futures.Futures are threadsafe # and can be used to await across threads tracer_future = concurrent.futures.Future() awaitable_future = asyncio.wrap_future(tracer_future) else: control_loop = self.io_loop tracer_future = awaitable_future = asyncio.Future() def _flush(): # control_stream.flush puts messages on the queue self.control_stream.flush() # put Future on the queue after all of those, # so we can wait for all queued messages to be processed self.control_queue.put(tracer_future) control_loop.add_callback(_flush) return awaitable_future async def process_control(self, msg): """dispatch control requests""" idents, msg = self.session.feed_identities(msg, copy=False) try: msg = self.session.deserialize(msg, content=True, copy=False) except Exception: self.log.error("Invalid Control Message", exc_info=True) return self.log.debug("Control received: %s", msg) # Set the parent message for side effects. self.set_parent(idents, msg, channel='control') self._publish_status('busy', 'control') header = msg['header'] msg_type = header['msg_type'] handler = self.control_handlers.get(msg_type, None) if handler is None: self.log.error("UNKNOWN CONTROL MESSAGE TYPE: %r", msg_type) else: try: result = handler(self.control_stream, idents, msg) if inspect.isawaitable(result): await result except Exception: self.log.error("Exception in control handler:", exc_info=True) sys.stdout.flush() sys.stderr.flush() self._publish_status('idle', 'control') # flush to ensure reply is sent self.control_stream.flush(zmq.POLLOUT) def should_handle(self, stream, msg, idents): """Check whether a shell-channel message should be handled Allows subclasses to prevent handling of certain messages (e.g. aborted requests). """ msg_id = msg['header']['msg_id'] if msg_id in self.aborted: # is it safe to assume a msg_id will not be resubmitted? self.aborted.remove(msg_id) self._send_abort_reply(stream, msg, idents) return False return True async def dispatch_shell(self, msg): """dispatch shell requests""" # flush control queue before handling shell requests await self._flush_control_queue() idents, msg = self.session.feed_identities(msg, copy=False) try: msg = self.session.deserialize(msg, content=True, copy=False) except Exception: self.log.error("Invalid Message", exc_info=True) return # Set the parent message for side effects. self.set_parent(idents, msg, channel='shell') self._publish_status('busy', 'shell') msg_type = msg['header']['msg_type'] # Only abort execute requests if self._aborting and msg_type == 'execute_request': self._send_abort_reply(self.shell_stream, msg, idents) self._publish_status('idle', 'shell') # flush to ensure reply is sent before # handling the next request self.shell_stream.flush(zmq.POLLOUT) return # Print some info about this message and leave a '--->' marker, so it's # easier to trace visually the message chain when debugging. Each # handler prints its message at the end. self.log.debug('\n*** MESSAGE TYPE:%s***', msg_type) self.log.debug(' Content: %s\n --->\n ', msg['content']) if not self.should_handle(self.shell_stream, msg, idents): return handler = self.shell_handlers.get(msg_type, None) if handler is None: self.log.warning("Unknown message type: %r", msg_type) else: self.log.debug("%s: %s", msg_type, msg) try: self.pre_handler_hook() except Exception: self.log.debug("Unable to signal in pre_handler_hook:", exc_info=True) try: result = handler(self.shell_stream, idents, msg) if inspect.isawaitable(result): await result except Exception: self.log.error("Exception in message handler:", exc_info=True) except KeyboardInterrupt: # Ctrl-c shouldn't crash the kernel here. self.log.error("KeyboardInterrupt caught in kernel.") finally: try: self.post_handler_hook() except Exception: self.log.debug("Unable to signal in post_handler_hook:", exc_info=True) sys.stdout.flush() sys.stderr.flush() self._publish_status('idle', 'shell') # flush to ensure reply is sent before # handling the next request self.shell_stream.flush(zmq.POLLOUT) def pre_handler_hook(self): """Hook to execute before calling message handler""" # ensure default_int_handler during handler call self.saved_sigint_handler = signal(SIGINT, default_int_handler) def post_handler_hook(self): """Hook to execute after calling message handler""" signal(SIGINT, self.saved_sigint_handler) def enter_eventloop(self): """enter eventloop""" self.log.info("Entering eventloop %s", self.eventloop) # record handle, so we can check when this changes eventloop = self.eventloop if eventloop is None: self.log.info("Exiting as there is no eventloop") return def advance_eventloop(): # check if eventloop changed: if self.eventloop is not eventloop: self.log.info("exiting eventloop %s", eventloop) return if self.msg_queue.qsize(): self.log.debug("Delaying eventloop due to waiting messages") # still messages to process, make the eventloop wait schedule_next() return self.log.debug("Advancing eventloop %s", eventloop) try: eventloop(self) except KeyboardInterrupt: # Ctrl-C shouldn't crash the kernel self.log.error("KeyboardInterrupt caught in kernel") pass if self.eventloop is eventloop: # schedule advance again schedule_next() def schedule_next(): """Schedule the next advance of the eventloop""" # flush the eventloop every so often, # giving us a chance to handle messages in the meantime self.log.debug("Scheduling eventloop advance") self.io_loop.call_later(0.001, advance_eventloop) # begin polling the eventloop schedule_next() async def do_one_iteration(self): """Process a single shell message Any pending control messages will be flushed as well .. versionchanged:: 5 This is now a coroutine """ # flush messages off of shell stream into the message queue self.shell_stream.flush() # process at most one shell message per iteration await self.process_one(wait=False) async def process_one(self, wait=True): """Process one request Returns None if no message was handled. """ if wait: t, dispatch, args = await self.msg_queue.get() else: try: t, dispatch, args = self.msg_queue.get_nowait() except (asyncio.QueueEmpty, QueueEmpty): return None await dispatch(*args) async def dispatch_queue(self): """Coroutine to preserve order of message handling Ensures that only one message is processing at a time, even when the handler is async """ while True: try: await self.process_one() except Exception: self.log.exception("Error in message handler") _message_counter = Any( help="""Monotonic counter of messages """, ) @default('_message_counter') def _message_counter_default(self): return itertools.count() def schedule_dispatch(self, dispatch, *args): """schedule a message for dispatch""" idx = next(self._message_counter) self.msg_queue.put_nowait( ( idx, dispatch, args, ) ) # ensure the eventloop wakes up self.io_loop.add_callback(lambda: None) def start(self): """register dispatchers for streams""" self.io_loop = ioloop.IOLoop.current() self.msg_queue = Queue() self.io_loop.add_callback(self.dispatch_queue) self.control_stream.on_recv(self.dispatch_control, copy=False) if self.control_thread: control_loop = self.control_thread.io_loop else: control_loop = self.io_loop asyncio.run_coroutine_threadsafe(self.poll_control_queue(), control_loop.asyncio_loop) self.shell_stream.on_recv( partial( self.schedule_dispatch, self.dispatch_shell, ), copy=False, ) # publish idle status self._publish_status('starting', 'shell') def record_ports(self, ports): """Record the ports that this kernel is using. The creator of the Kernel instance must call this methods if they want the :meth:`connect_request` method to return the port numbers. """ self._recorded_ports = ports #--------------------------------------------------------------------------- # Kernel request handlers #--------------------------------------------------------------------------- def _publish_execute_input(self, code, parent, execution_count): """Publish the code request on the iopub stream.""" self.session.send(self.iopub_socket, 'execute_input', {'code':code, 'execution_count': execution_count}, parent=parent, ident=self._topic('execute_input') ) def _publish_status(self, status, channel, parent=None): """send status (busy/idle) on IOPub""" self.session.send( self.iopub_socket, "status", {"execution_state": status}, parent=parent or self.get_parent(channel), ident=self._topic("status"), ) def _publish_debug_event(self, event): self.session.send( self.iopub_socket, "debug_event", event, parent=self.get_parent("control"), ident=self._topic("debug_event"), ) def set_parent(self, ident, parent, channel='shell'): """Set the current parent request Side effects (IOPub messages) and replies are associated with the request that caused them via the parent_header. The parent identity is used to route input_request messages on the stdin channel. """ self._parent_ident[channel] = ident self._parents[channel] = parent def get_parent(self, channel="shell"): """Get the parent request associated with a channel. .. versionadded:: 6 Parameters ---------- channel : str the name of the channel ('shell' or 'control') Returns ------- message : dict the parent message for the most recent request on the channel. """ return self._parents.get(channel, {}) def send_response(self, stream, msg_or_type, content=None, ident=None, buffers=None, track=False, header=None, metadata=None, channel='shell'): """Send a response to the message we're currently processing. This accepts all the parameters of :meth:`jupyter_client.session.Session.send` except ``parent``. This relies on :meth:`set_parent` having been called for the current message. """ return self.session.send( stream, msg_or_type, content, self.get_parent(channel), ident, buffers, track, header, metadata, ) def init_metadata(self, parent): """Initialize metadata. Run at the beginning of execution requests. """ # FIXME: `started` is part of ipyparallel # Remove for ipykernel 5.0 return { 'started': now(), } def finish_metadata(self, parent, metadata, reply_content): """Finish populating metadata. Run after completing an execution request. """ return metadata async def execute_request(self, stream, ident, parent): """handle an execute_request""" try: content = parent['content'] code = content['code'] silent = content['silent'] store_history = content.get('store_history', not silent) user_expressions = content.get('user_expressions', {}) allow_stdin = content.get('allow_stdin', False) except Exception: self.log.error("Got bad msg: ") self.log.error("%s", parent) return stop_on_error = content.get('stop_on_error', True) metadata = self.init_metadata(parent) # Re-broadcast our input for the benefit of listening clients, and # start computing output if not silent: self.execution_count += 1 self._publish_execute_input(code, parent, self.execution_count) reply_content = self.do_execute( code, silent, store_history, user_expressions, allow_stdin, ) if inspect.isawaitable(reply_content): reply_content = await reply_content # Flush output before sending the reply. sys.stdout.flush() sys.stderr.flush() # FIXME: on rare occasions, the flush doesn't seem to make it to the # clients... This seems to mitigate the problem, but we definitely need # to better understand what's going on. if self._execute_sleep: time.sleep(self._execute_sleep) # Send the reply. reply_content = json_clean(reply_content) metadata = self.finish_metadata(parent, metadata, reply_content) reply_msg = self.session.send(stream, 'execute_reply', reply_content, parent, metadata=metadata, ident=ident) self.log.debug("%s", reply_msg) if not silent and reply_msg['content']['status'] == 'error' and stop_on_error: await self._abort_queues() def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False): """Execute user code. Must be overridden by subclasses. """ raise NotImplementedError async def complete_request(self, stream, ident, parent): content = parent['content'] code = content['code'] cursor_pos = content['cursor_pos'] matches = self.do_complete(code, cursor_pos) if inspect.isawaitable(matches): matches = await matches matches = json_clean(matches) self.session.send(stream, "complete_reply", matches, parent, ident) def do_complete(self, code, cursor_pos): """Override in subclasses to find completions. """ return {'matches' : [], 'cursor_end' : cursor_pos, 'cursor_start' : cursor_pos, 'metadata' : {}, 'status' : 'ok'} async def inspect_request(self, stream, ident, parent): content = parent['content'] reply_content = self.do_inspect( content['code'], content['cursor_pos'], content.get('detail_level', 0), set(content.get('omit_sections', [])), ) if inspect.isawaitable(reply_content): reply_content = await reply_content # Before we send this object over, we scrub it for JSON usage reply_content = json_clean(reply_content) msg = self.session.send(stream, 'inspect_reply', reply_content, parent, ident) self.log.debug("%s", msg) def do_inspect(self, code, cursor_pos, detail_level=0, omit_sections=()): """Override in subclasses to allow introspection. """ return {'status': 'ok', 'data': {}, 'metadata': {}, 'found': False} async def history_request(self, stream, ident, parent): content = parent['content'] reply_content = self.do_history(**content) if inspect.isawaitable(reply_content): reply_content = await reply_content reply_content = json_clean(reply_content) msg = self.session.send(stream, 'history_reply', reply_content, parent, ident) self.log.debug("%s", msg) def do_history(self, hist_access_type, output, raw, session=None, start=None, stop=None, n=None, pattern=None, unique=False): """Override in subclasses to access history. """ return {'status': 'ok', 'history': []} async def connect_request(self, stream, ident, parent): if self._recorded_ports is not None: content = self._recorded_ports.copy() else: content = {} content['status'] = 'ok' msg = self.session.send(stream, 'connect_reply', content, parent, ident) self.log.debug("%s", msg) @property def kernel_info(self): return { 'protocol_version': kernel_protocol_version, 'implementation': self.implementation, 'implementation_version': self.implementation_version, 'language_info': self.language_info, 'banner': self.banner, 'help_links': self.help_links, } async def kernel_info_request(self, stream, ident, parent): content = {'status': 'ok'} content.update(self.kernel_info) msg = self.session.send(stream, 'kernel_info_reply', content, parent, ident) self.log.debug("%s", msg) async def comm_info_request(self, stream, ident, parent): content = parent['content'] target_name = content.get('target_name', None) # Should this be moved to ipkernel? if hasattr(self, 'comm_manager'): comms = { k: dict(target_name=v.target_name) for (k, v) in self.comm_manager.comms.items() if v.target_name == target_name or target_name is None } else: comms = {} reply_content = dict(comms=comms, status='ok') msg = self.session.send(stream, 'comm_info_reply', reply_content, parent, ident) self.log.debug("%s", msg) async def interrupt_request(self, stream, ident, parent): pid = os.getpid() pgid = os.getpgid(pid) if os.name == "nt": self.log.error("Interrupt message not supported on Windows") else: # Prefer process-group over process if pgid and hasattr(os, "killpg"): try: os.killpg(pgid, SIGINT) return except OSError: pass try: os.kill(pid, SIGINT) except OSError: pass content = parent['content'] self.session.send(stream, 'interrupt_reply', content, parent, ident=ident) return async def shutdown_request(self, stream, ident, parent): content = self.do_shutdown(parent['content']['restart']) if inspect.isawaitable(content): content = await content self.session.send(stream, 'shutdown_reply', content, parent, ident=ident) # same content, but different msg_id for broadcasting on IOPub self._shutdown_message = self.session.msg('shutdown_reply', content, parent ) self._at_shutdown() self.log.debug('Stopping control ioloop') control_io_loop = self.control_stream.io_loop control_io_loop.add_callback(control_io_loop.stop) self.log.debug('Stopping shell ioloop') shell_io_loop = self.shell_stream.io_loop shell_io_loop.add_callback(shell_io_loop.stop) def do_shutdown(self, restart): """Override in subclasses to do things when the frontend shuts down the kernel. """ return {'status': 'ok', 'restart': restart} async def is_complete_request(self, stream, ident, parent): content = parent['content'] code = content['code'] reply_content = self.do_is_complete(code) if inspect.isawaitable(reply_content): reply_content = await reply_content reply_content = json_clean(reply_content) reply_msg = self.session.send(stream, 'is_complete_reply', reply_content, parent, ident) self.log.debug("%s", reply_msg) def do_is_complete(self, code): """Override in subclasses to find completions. """ return { 'status' : 'unknown'} async def debug_request(self, stream, ident, parent): content = parent['content'] reply_content = self.do_debug_request(content) if inspect.isawaitable(reply_content): reply_content = await reply_content reply_content = json_clean(reply_content) reply_msg = self.session.send(stream, 'debug_reply', reply_content, parent, ident) self.log.debug("%s", reply_msg) # Taken from https://github.com/jupyter-server/jupyter-resource-usage/blob/e6ec53fa69fdb6de8e878974bcff006310658408/jupyter_resource_usage/metrics.py#L16 def get_process_metric_value(self, process, name, attribute=None): try: # psutil.Process methods will either return... metric_value = getattr(process, name)() if attribute is not None: # ... a named tuple return getattr(metric_value, attribute) else: # ... or a number return metric_value # Avoid littering logs with stack traces # complaining about dead processes except BaseException: return None async def usage_request(self, stream, ident, parent): reply_content = {} if psutil is None: return reply_content current_process = psutil.Process() all_processes = [current_process] + current_process.children(recursive=True) process_metric_value = self.get_process_metric_value reply_content['kernel_cpu'] = sum([process_metric_value(process, 'cpu_percent', None) for process in all_processes]) reply_content['kernel_memory'] = sum([process_metric_value(process, 'memory_info', 'rss') for process in all_processes]) cpu_percent = psutil.cpu_percent() # https://psutil.readthedocs.io/en/latest/index.html?highlight=cpu#psutil.cpu_percent # The first time cpu_percent is called it will return a meaningless 0.0 value which you are supposed to ignore. if cpu_percent != None and cpu_percent != 0.0: reply_content['host_cpu_percent'] = cpu_percent reply_content['host_virtual_memory'] = dict(psutil.virtual_memory()._asdict()) reply_msg = self.session.send(stream, 'usage_reply', reply_content, parent, ident) self.log.debug("%s", reply_msg) async def do_debug_request(self, msg): raise NotImplementedError #--------------------------------------------------------------------------- # Engine methods (DEPRECATED) #--------------------------------------------------------------------------- async def apply_request(self, stream, ident, parent): self.log.warning("apply_request is deprecated in kernel_base, moving to ipyparallel.") try: content = parent['content'] bufs = parent['buffers'] msg_id = parent['header']['msg_id'] except Exception: self.log.error("Got bad msg: %s", parent, exc_info=True) return md = self.init_metadata(parent) reply_content, result_buf = self.do_apply(content, bufs, msg_id, md) # flush i/o sys.stdout.flush() sys.stderr.flush() md = self.finish_metadata(parent, md, reply_content) self.session.send(stream, 'apply_reply', reply_content, parent=parent, ident=ident,buffers=result_buf, metadata=md) def do_apply(self, content, bufs, msg_id, reply_metadata): """DEPRECATED""" raise NotImplementedError #--------------------------------------------------------------------------- # Control messages (DEPRECATED) #--------------------------------------------------------------------------- async def abort_request(self, stream, ident, parent): """abort a specific msg by id""" self.log.warning("abort_request is deprecated in kernel_base. It is only part of IPython parallel") msg_ids = parent['content'].get('msg_ids', None) if isinstance(msg_ids, str): msg_ids = [msg_ids] if not msg_ids: self._abort_queues() for mid in msg_ids: self.aborted.add(str(mid)) content = dict(status='ok') reply_msg = self.session.send(stream, 'abort_reply', content=content, parent=parent, ident=ident) self.log.debug("%s", reply_msg) async def clear_request(self, stream, idents, parent): """Clear our namespace.""" self.log.warning("clear_request is deprecated in kernel_base. It is only part of IPython parallel") content = self.do_clear() self.session.send(stream, 'clear_reply', ident=idents, parent=parent, content = content) def do_clear(self): """DEPRECATED since 4.0.3""" raise NotImplementedError #--------------------------------------------------------------------------- # Protected interface #--------------------------------------------------------------------------- def _topic(self, topic): """prefixed topic for IOPub messages""" base = "kernel.%s" % self.ident return ("%s.%s" % (base, topic)).encode() _aborting = Bool(False) async def _abort_queues(self): self.shell_stream.flush() self._aborting = True def stop_aborting(): self.log.info("Finishing abort") self._aborting = False asyncio.get_event_loop().call_later(self.stop_on_error_timeout, stop_aborting) def _send_abort_reply(self, stream, msg, idents): """Send a reply to an aborted request""" self.log.info( f"Aborting {msg['header']['msg_id']}: {msg['header']['msg_type']}" ) reply_type = msg["header"]["msg_type"].rsplit("_", 1)[0] + "_reply" status = {"status": "aborted"} md = self.init_metadata(msg) md = self.finish_metadata(msg, md, status) md.update(status) self.session.send( stream, reply_type, metadata=md, content=status, parent=msg, ident=idents, ) def _no_raw_input(self): """Raise StdinNotImplementedError if active frontend doesn't support stdin.""" raise StdinNotImplementedError("raw_input was called, but this " "frontend does not support stdin.") def getpass(self, prompt='', stream=None): """Forward getpass to frontends Raises ------ StdinNotImplementedError if active frontend doesn't support stdin. """ if not self._allow_stdin: raise StdinNotImplementedError( "getpass was called, but this frontend does not support input requests." ) if stream is not None: import warnings warnings.warn( "The `stream` parameter of `getpass.getpass` will have no effect when using ipykernel", UserWarning, stacklevel=2, ) return self._input_request( prompt, self._parent_ident["shell"], self.get_parent("shell"), password=True, ) def raw_input(self, prompt=''): """Forward raw_input to frontends Raises ------ StdinNotImplementedError if active frontend doesn't support stdin. """ if not self._allow_stdin: raise StdinNotImplementedError( "raw_input was called, but this frontend does not support input requests." ) return self._input_request( str(prompt), self._parent_ident["shell"], self.get_parent("shell"), password=False, ) def _input_request(self, prompt, ident, parent, password=False): # Flush output before making the request. sys.stderr.flush() sys.stdout.flush() # flush the stdin socket, to purge stale replies while True: try: self.stdin_socket.recv_multipart(zmq.NOBLOCK) except zmq.ZMQError as e: if e.errno == zmq.EAGAIN: break else: raise # Send the input request. content = json_clean(dict(prompt=prompt, password=password)) self.session.send(self.stdin_socket, 'input_request', content, parent, ident=ident) # Await a response. while True: try: # Use polling with select() so KeyboardInterrupts can get # through; doing a blocking recv() means stdin reads are # uninterruptible on Windows. We need a timeout because # zmq.select() is also uninterruptible, but at least this # way reads get noticed immediately and KeyboardInterrupts # get noticed fairly quickly by human response time standards. rlist, _, xlist = zmq.select( [self.stdin_socket], [], [self.stdin_socket], 0.01 ) if rlist or xlist: ident, reply = self.session.recv(self.stdin_socket) if (ident, reply) != (None, None): break except KeyboardInterrupt: # re-raise KeyboardInterrupt, to truncate traceback raise KeyboardInterrupt("Interrupted by user") from None except Exception: self.log.warning("Invalid Message:", exc_info=True) try: value = reply["content"]["value"] except Exception: self.log.error("Bad input_reply: %s", parent) value = '' if value == '\x04': # EOF raise EOFError return value def _at_shutdown(self): """Actions taken at shutdown by the kernel, called by python's atexit. """ if self._shutdown_message is not None: self.session.send(self.iopub_socket, self._shutdown_message, ident=self._topic('shutdown')) self.log.debug("%s", self._shutdown_message) self.control_stream.flush(zmq.POLLOUT) ipykernel-6.7.0/ipykernel/kernelspec.py000066400000000000000000000162031417004153500202370ustar00rootroot00000000000000"""The IPython kernel spec for Jupyter""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import errno import json import os import shutil import stat import sys import tempfile from jupyter_client.kernelspec import KernelSpecManager from .ipkernel import _is_debugpy_available pjoin = os.path.join KERNEL_NAME = 'python%i' % sys.version_info[0] # path to kernelspec resources RESOURCES = pjoin(os.path.dirname(__file__), 'resources') def make_ipkernel_cmd(mod="ipykernel_launcher", executable=None, extra_arguments=None): """Build Popen command list for launching an IPython kernel. Parameters ---------- mod : str, optional (default 'ipykernel') A string of an IPython module whose __main__ starts an IPython kernel executable : str, optional (default sys.executable) The Python executable to use for the kernel process. extra_arguments : list, optional A list of extra arguments to pass when executing the launch code. Returns ------- A Popen command list """ if executable is None: executable = sys.executable extra_arguments = extra_arguments or [] arguments = [executable, '-m', mod, '-f', '{connection_file}'] arguments.extend(extra_arguments) return arguments def get_kernel_dict(extra_arguments=None): """Construct dict for kernel.json""" return { 'argv': make_ipkernel_cmd(extra_arguments=extra_arguments), 'display_name': 'Python %i (ipykernel)' % sys.version_info[0], 'language': 'python', 'metadata': { 'debugger': _is_debugpy_available} } def write_kernel_spec(path=None, overrides=None, extra_arguments=None): """Write a kernel spec directory to `path` If `path` is not specified, a temporary directory is created. If `overrides` is given, the kernelspec JSON is updated before writing. The path to the kernelspec is always returned. """ if path is None: path = os.path.join(tempfile.mkdtemp(suffix='_kernels'), KERNEL_NAME) # stage resources shutil.copytree(RESOURCES, path) # ensure path is writable mask = os.stat(path).st_mode if not mask & stat.S_IWUSR: os.chmod(path, mask | stat.S_IWUSR) # write kernel.json kernel_dict = get_kernel_dict(extra_arguments) if overrides: kernel_dict.update(overrides) with open(pjoin(path, 'kernel.json'), 'w') as f: json.dump(kernel_dict, f, indent=1) return path def install(kernel_spec_manager=None, user=False, kernel_name=KERNEL_NAME, display_name=None, prefix=None, profile=None, env=None): """Install the IPython kernelspec for Jupyter Parameters ---------- kernel_spec_manager : KernelSpecManager [optional] A KernelSpecManager to use for installation. If none provided, a default instance will be created. user : bool [default: False] Whether to do a user-only install, or system-wide. kernel_name : str, optional Specify a name for the kernelspec. This is needed for having multiple IPython kernels for different environments. display_name : str, optional Specify the display name for the kernelspec profile : str, optional Specify a custom profile to be loaded by the kernel. prefix : str, optional Specify an install prefix for the kernelspec. This is needed to install into a non-default location, such as a conda/virtual-env. env : dict, optional A dictionary of extra environment variables for the kernel. These will be added to the current environment variables before the kernel is started Returns ------- The path where the kernelspec was installed. """ if kernel_spec_manager is None: kernel_spec_manager = KernelSpecManager() if (kernel_name != KERNEL_NAME) and (display_name is None): # kernel_name is specified and display_name is not # default display_name to kernel_name display_name = kernel_name overrides = {} if display_name: overrides["display_name"] = display_name if profile: extra_arguments = ["--profile", profile] if not display_name: # add the profile to the default display name overrides["display_name"] = 'Python %i [profile=%s]' % (sys.version_info[0], profile) else: extra_arguments = None if env: overrides['env'] = env path = write_kernel_spec(overrides=overrides, extra_arguments=extra_arguments) dest = kernel_spec_manager.install_kernel_spec( path, kernel_name=kernel_name, user=user, prefix=prefix) # cleanup afterward shutil.rmtree(path) return dest # Entrypoint from traitlets.config import Application class InstallIPythonKernelSpecApp(Application): """Dummy app wrapping argparse""" name = 'ipython-kernel-install' def initialize(self, argv=None): if argv is None: argv = sys.argv[1:] self.argv = argv def start(self): import argparse parser = argparse.ArgumentParser(prog=self.name, description="Install the IPython kernel spec.") parser.add_argument('--user', action='store_true', help="Install for the current user instead of system-wide") parser.add_argument('--name', type=str, default=KERNEL_NAME, help="Specify a name for the kernelspec." " This is needed to have multiple IPython kernels at the same time.") parser.add_argument('--display-name', type=str, help="Specify the display name for the kernelspec." " This is helpful when you have multiple IPython kernels.") parser.add_argument('--profile', type=str, help="Specify an IPython profile to load. " "This can be used to create custom versions of the kernel.") parser.add_argument('--prefix', type=str, help="Specify an install prefix for the kernelspec." " This is needed to install into a non-default location, such as a conda/virtual-env.") parser.add_argument('--sys-prefix', action='store_const', const=sys.prefix, dest='prefix', help="Install to Python's sys.prefix." " Shorthand for --prefix='%s'. For use in conda/virtual-envs." % sys.prefix) parser.add_argument('--env', action='append', nargs=2, metavar=('ENV', 'VALUE'), help="Set environment variables for the kernel.") opts = parser.parse_args(self.argv) if opts.env: opts.env = {k:v for (k, v) in opts.env} try: dest = install(user=opts.user, kernel_name=opts.name, profile=opts.profile, prefix=opts.prefix, display_name=opts.display_name, env=opts.env) except OSError as e: if e.errno == errno.EACCES: print(e, file=sys.stderr) if opts.user: print("Perhaps you want `sudo` or `--user`?", file=sys.stderr) self.exit(1) raise print("Installed kernelspec %s in %s" % (opts.name, dest)) if __name__ == '__main__': InstallIPythonKernelSpecApp.launch_instance() ipykernel-6.7.0/ipykernel/log.py000066400000000000000000000013401417004153500166610ustar00rootroot00000000000000from zmq.log.handlers import PUBHandler import warnings warnings.warn("ipykernel.log is deprecated. It has moved to ipyparallel.engine.log", DeprecationWarning, stacklevel=2 ) class EnginePUBHandler(PUBHandler): """A simple PUBHandler subclass that sets root_topic""" engine=None def __init__(self, engine, *args, **kwargs): PUBHandler.__init__(self,*args, **kwargs) self.engine = engine @property def root_topic(self): """this is a property, in case the handler is created before the engine gets registered with an id""" if isinstance(getattr(self.engine, 'id', None), int): return "engine.%i"%self.engine.id else: return "engine" ipykernel-6.7.0/ipykernel/parentpoller.py000066400000000000000000000077401417004153500206210ustar00rootroot00000000000000# Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. try: import ctypes except ImportError: ctypes = None import os import platform import signal import time from _thread import interrupt_main # Py 3 from threading import Thread from traitlets.log import get_logger import warnings class ParentPollerUnix(Thread): """ A Unix-specific daemon thread that terminates the program immediately when the parent process no longer exists. """ def __init__(self): super().__init__() self.daemon = True def run(self): # We cannot use os.waitpid because it works only for child processes. from errno import EINTR while True: try: if os.getppid() == 1: get_logger().warning("Parent appears to have exited, shutting down.") os._exit(1) time.sleep(1.0) except OSError as e: if e.errno == EINTR: continue raise class ParentPollerWindows(Thread): """ A Windows-specific daemon thread that listens for a special event that signals an interrupt and, optionally, terminates the program immediately when the parent process no longer exists. """ def __init__(self, interrupt_handle=None, parent_handle=None): """ Create the poller. At least one of the optional parameters must be provided. Parameters ---------- interrupt_handle : HANDLE (int), optional If provided, the program will generate a Ctrl+C event when this handle is signaled. parent_handle : HANDLE (int), optional If provided, the program will terminate immediately when this handle is signaled. """ assert(interrupt_handle or parent_handle) super().__init__() if ctypes is None: raise ImportError("ParentPollerWindows requires ctypes") self.daemon = True self.interrupt_handle = interrupt_handle self.parent_handle = parent_handle def run(self): """ Run the poll loop. This method never returns. """ try: from _winapi import WAIT_OBJECT_0, INFINITE except ImportError: from _subprocess import WAIT_OBJECT_0, INFINITE # Build the list of handle to listen on. handles = [] if self.interrupt_handle: handles.append(self.interrupt_handle) if self.parent_handle: handles.append(self.parent_handle) arch = platform.architecture()[0] c_int = ctypes.c_int64 if arch.startswith('64') else ctypes.c_int # Listen forever. while True: result = ctypes.windll.kernel32.WaitForMultipleObjects( len(handles), # nCount (c_int * len(handles))(*handles), # lpHandles False, # bWaitAll INFINITE) # dwMilliseconds if WAIT_OBJECT_0 <= result < len(handles): handle = handles[result - WAIT_OBJECT_0] if handle == self.interrupt_handle: # check if signal handler is callable # to avoid 'int not callable' error (Python issue #23395) if callable(signal.getsignal(signal.SIGINT)): interrupt_main() elif handle == self.parent_handle: get_logger().warning("Parent appears to have exited, shutting down.") os._exit(1) elif result < 0: # wait failed, just give up and stop polling. warnings.warn("""Parent poll failed. If the frontend dies, the kernel may be left running. Please let us know about your system (bitness, Python, etc.) at ipython-dev@scipy.org""") return ipykernel-6.7.0/ipykernel/pickleutil.py000066400000000000000000000301641417004153500202530ustar00rootroot00000000000000"""Pickle related utilities. Perhaps this should be called 'can'.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import typing import warnings warnings.warn("ipykernel.pickleutil is deprecated. It has moved to ipyparallel.", DeprecationWarning, stacklevel=2 ) import copy import sys import pickle from types import FunctionType from traitlets.utils.importstring import import_item # This registers a hook when it's imported from ipyparallel.serialize import codeutil # noqa F401 from traitlets.log import get_logger buffer = memoryview class_type = type PICKLE_PROTOCOL = pickle.DEFAULT_PROTOCOL def _get_cell_type(a=None): """the type of a closure cell doesn't seem to be importable, so just create one """ def inner(): return a return type(inner.__closure__[0]) cell_type = _get_cell_type() #------------------------------------------------------------------------------- # Functions #------------------------------------------------------------------------------- def interactive(f): """decorator for making functions appear as interactively defined. This results in the function being linked to the user_ns as globals() instead of the module globals(). """ # build new FunctionType, so it can have the right globals # interactive functions never have closures, that's kind of the point if isinstance(f, FunctionType): mainmod = __import__('__main__') f = FunctionType(f.__code__, mainmod.__dict__, f.__name__, f.__defaults__, ) # associate with __main__ for uncanning f.__module__ = '__main__' return f def use_dill(): """use dill to expand serialization support adds support for object methods and closures to serialization. """ # import dill causes most of the magic import dill # dill doesn't work with cPickle, # tell the two relevant modules to use plain pickle global pickle pickle = dill try: from ipykernel import serialize except ImportError: pass else: serialize.pickle = dill # disable special function handling, let dill take care of it can_map.pop(FunctionType, None) def use_cloudpickle(): """use cloudpickle to expand serialization support adds support for object methods and closures to serialization. """ import cloudpickle global pickle pickle = cloudpickle try: from ipykernel import serialize except ImportError: pass else: serialize.pickle = cloudpickle # disable special function handling, let cloudpickle take care of it can_map.pop(FunctionType, None) #------------------------------------------------------------------------------- # Classes #------------------------------------------------------------------------------- class CannedObject: def __init__(self, obj, keys=[], hook=None): """can an object for safe pickling Parameters ---------- obj The object to be canned keys : list (optional) list of attribute names that will be explicitly canned / uncanned hook : callable (optional) An optional extra callable, which can do additional processing of the uncanned object. Notes ----- large data may be offloaded into the buffers list, used for zero-copy transfers. """ self.keys = keys self.obj = copy.copy(obj) self.hook = can(hook) for key in keys: setattr(self.obj, key, can(getattr(obj, key))) self.buffers = [] def get_object(self, g=None): if g is None: g = {} obj = self.obj for key in self.keys: setattr(obj, key, uncan(getattr(obj, key), g)) if self.hook: self.hook = uncan(self.hook, g) self.hook(obj, g) return self.obj class Reference(CannedObject): """object for wrapping a remote reference by name.""" def __init__(self, name): if not isinstance(name, str): raise TypeError("illegal name: %r"%name) self.name = name self.buffers = [] def __repr__(self): return ""%self.name def get_object(self, g=None): if g is None: g = {} return eval(self.name, g) class CannedCell(CannedObject): """Can a closure cell""" def __init__(self, cell): self.cell_contents = can(cell.cell_contents) def get_object(self, g=None): cell_contents = uncan(self.cell_contents, g) def inner(): return cell_contents return inner.__closure__[0] class CannedFunction(CannedObject): def __init__(self, f): self._check_type(f) self.code = f.__code__ if f.__defaults__: self.defaults = [ can(fd) for fd in f.__defaults__ ] else: self.defaults = None closure = f.__closure__ if closure: self.closure = tuple( can(cell) for cell in closure ) else: self.closure = None self.module = f.__module__ or '__main__' self.__name__ = f.__name__ self.buffers = [] def _check_type(self, obj): assert isinstance(obj, FunctionType), "Not a function type" def get_object(self, g=None): # try to load function back into its module: if not self.module.startswith('__'): __import__(self.module) g = sys.modules[self.module].__dict__ if g is None: g = {} if self.defaults: defaults = tuple(uncan(cfd, g) for cfd in self.defaults) else: defaults = None if self.closure: closure = tuple(uncan(cell, g) for cell in self.closure) else: closure = None newFunc = FunctionType(self.code, g, self.__name__, defaults, closure) return newFunc class CannedClass(CannedObject): def __init__(self, cls): self._check_type(cls) self.name = cls.__name__ self.old_style = not isinstance(cls, type) self._canned_dict = {} for k,v in cls.__dict__.items(): if k not in ('__weakref__', '__dict__'): self._canned_dict[k] = can(v) if self.old_style: mro = [] else: mro = cls.mro() self.parents = [ can(c) for c in mro[1:] ] self.buffers = [] def _check_type(self, obj): assert isinstance(obj, class_type), "Not a class type" def get_object(self, g=None): parents = tuple(uncan(p, g) for p in self.parents) return type(self.name, parents, uncan_dict(self._canned_dict, g=g)) class CannedArray(CannedObject): def __init__(self, obj): from numpy import ascontiguousarray self.shape = obj.shape self.dtype = obj.dtype.descr if obj.dtype.fields else obj.dtype.str self.pickled = False if sum(obj.shape) == 0: self.pickled = True elif obj.dtype == 'O': # can't handle object dtype with buffer approach self.pickled = True elif obj.dtype.fields and any(dt == 'O' for dt,sz in obj.dtype.fields.values()): self.pickled = True if self.pickled: # just pickle it self.buffers = [pickle.dumps(obj, PICKLE_PROTOCOL)] else: # ensure contiguous obj = ascontiguousarray(obj, dtype=None) self.buffers = [buffer(obj)] def get_object(self, g=None): from numpy import frombuffer data = self.buffers[0] if self.pickled: # we just pickled it return pickle.loads(data) else: return frombuffer(data, dtype=self.dtype).reshape(self.shape) class CannedBytes(CannedObject): @staticmethod def wrap(buf: typing.Union[memoryview, bytes, typing.SupportsBytes]) -> bytes: """Cast a buffer or memoryview object to bytes""" if isinstance(buf, memoryview): return buf.tobytes() if not isinstance(buf, bytes): return bytes(buf) return buf def __init__(self, obj): self.buffers = [obj] def get_object(self, g=None): data = self.buffers[0] return self.wrap(data) class CannedBuffer(CannedBytes): wrap = buffer class CannedMemoryView(CannedBytes): wrap = memoryview #------------------------------------------------------------------------------- # Functions #------------------------------------------------------------------------------- def _import_mapping(mapping, original=None): """import any string-keys in a type mapping """ log = get_logger() log.debug("Importing canning map") for key,value in list(mapping.items()): if isinstance(key, str): try: cls = import_item(key) except Exception: if original and key not in original: # only message on user-added classes log.error("canning class not importable: %r", key, exc_info=True) mapping.pop(key) else: mapping[cls] = mapping.pop(key) def istype(obj, check): """like isinstance(obj, check), but strict This won't catch subclasses. """ if isinstance(check, tuple): for cls in check: if type(obj) is cls: return True return False else: return type(obj) is check def can(obj): """prepare an object for pickling""" import_needed = False for cls,canner in can_map.items(): if isinstance(cls, str): import_needed = True break elif istype(obj, cls): return canner(obj) if import_needed: # perform can_map imports, then try again # this will usually only happen once _import_mapping(can_map, _original_can_map) return can(obj) return obj def can_class(obj): if isinstance(obj, class_type) and obj.__module__ == '__main__': return CannedClass(obj) else: return obj def can_dict(obj): """can the *values* of a dict""" if istype(obj, dict): newobj = {} for k, v in obj.items(): newobj[k] = can(v) return newobj else: return obj sequence_types = (list, tuple, set) def can_sequence(obj): """can the elements of a sequence""" if istype(obj, sequence_types): t = type(obj) return t([can(i) for i in obj]) else: return obj def uncan(obj, g=None): """invert canning""" import_needed = False for cls,uncanner in uncan_map.items(): if isinstance(cls, str): import_needed = True break elif isinstance(obj, cls): return uncanner(obj, g) if import_needed: # perform uncan_map imports, then try again # this will usually only happen once _import_mapping(uncan_map, _original_uncan_map) return uncan(obj, g) return obj def uncan_dict(obj, g=None): if istype(obj, dict): newobj = {} for k, v in obj.items(): newobj[k] = uncan(v,g) return newobj else: return obj def uncan_sequence(obj, g=None): if istype(obj, sequence_types): t = type(obj) return t([uncan(i,g) for i in obj]) else: return obj #------------------------------------------------------------------------------- # API dictionaries #------------------------------------------------------------------------------- # These dicts can be extended for custom serialization of new objects can_map = { 'numpy.ndarray' : CannedArray, FunctionType : CannedFunction, bytes : CannedBytes, memoryview : CannedMemoryView, cell_type : CannedCell, class_type : can_class, } if buffer is not memoryview: can_map[buffer] = CannedBuffer uncan_map = { CannedObject : lambda obj, g: obj.get_object(g), dict : uncan_dict, } # for use in _import_mapping: _original_can_map = can_map.copy() _original_uncan_map = uncan_map.copy() ipykernel-6.7.0/ipykernel/pylab/000077500000000000000000000000001417004153500166375ustar00rootroot00000000000000ipykernel-6.7.0/ipykernel/pylab/__init__.py000066400000000000000000000000001417004153500207360ustar00rootroot00000000000000ipykernel-6.7.0/ipykernel/pylab/backend_inline.py000066400000000000000000000006421417004153500221400ustar00rootroot00000000000000"""A matplotlib backend for publishing figures via display_data""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import warnings from matplotlib_inline.backend_inline import * # analysis: ignore # noqa F401 warnings.warn( "`ipykernel.pylab.backend_inline` is deprecated, directly " "use `matplotlib_inline.backend_inline`", DeprecationWarning ) ipykernel-6.7.0/ipykernel/pylab/config.py000066400000000000000000000005271417004153500204620ustar00rootroot00000000000000"""Configurable for configuring the IPython inline backend This module does not import anything from matplotlib. """ import warnings from matplotlib_inline.config import * # analysis: ignore # noqa F401 warnings.warn( "`ipykernel.pylab.config` is deprecated, directly " "use `matplotlib_inline.config`", DeprecationWarning ) ipykernel-6.7.0/ipykernel/resources/000077500000000000000000000000001417004153500175425ustar00rootroot00000000000000ipykernel-6.7.0/ipykernel/resources/logo-32x32.png000066400000000000000000000020741417004153500217720ustar00rootroot00000000000000PNG  IHDR szzbKGD pHYs}}tIME "4kIDATXW]h\E=nmXBZ "?OUSQ"VI[EP-PP ZkXJcQ1anߦW7s9372ʈ{ d "#&ғ*;=TvuP1"AH^2:rdxKE)>.c)=L%BwH AYsp͟I{Pij,:#BǙ4dSvFW2(Yt2YGT$xҟobcXr|'cGЕ:@)<`QOw\Ḩ3Z1@:`)wZ*b 3ߍz:%2 îsmOFO5Ny]kG]{4y ^lSpn<@?.n@[4I}by1o Tb>IENDB`ipykernel-6.7.0/ipykernel/resources/logo-64x64.png000066400000000000000000000042041417004153500220010ustar00rootroot00000000000000PNG  IHDR@@iqbKGD pHYsEtIME ")IDATx[}lUg=" m:vU6u6D.u: n3deiAVV(t.3#Qh7=?s9v;ƾm޷=:o15t\6>$HR$9dIN'9Hb@ (Q6ɣ)S/U )B8s-\5{mX]ֽaUi7$G^ y C}=4?D?Y<- Yώx @\ J^P"[Hp 0 ֦A񫥩_L/'ڈOFF>v`MoT$No kuN=v`A]]{wiB #DŇcIR uQH.p4ZX cI!*5y7<`ڷB_@$ ͋J?֌EpF]`xtf;C/ ØhXKL-@w< P[`V{P#83a`5p'ڠ )L&eFӴh߳PO@ :O5[[Xw`T߱G$i˾%T٨ !:'%Wr  }2 %'BM[:[TdĒ|!!ge0ϵU@Vn\kA Nu`#pֿ$;@M,p z{;FR23 6Q| !5OhH!J|/AB|E[K33iߞp%H@/G3՗+kxFH-\(YQ ^ +#8<FWP5VH- !("YK;~3 pFB+lrpb\\5vc?WH.=(`;H_cFaG%Q<[)3z$=E(oǡ0xTv%uDW'TjA1;Duh@Ïo3R)@,#I-˞`6Fj5>Pp3uY5H yE \`)o޼gۖbM6(EBESD!#t>'Ksrh޼C1;ϑEqta Dvhx׏o-[W\g-jא=   D*pxۗ9Z8g#9:SPeD 81>PoN#HnZ뵦;,J+^G(d-"sj:@J I}Uq6 qF}fk1c2yޭ%nFY;"5D*v7kna&ÁilDP#3pmWG$:6|&@aÀ4L4oJ-"]WIME4Z A RpolhR?Kt`>6Zbx!(E)1u ?tۓZ|ߎ[ Tαs̷ÜZ@{2ΞIiXdcC@v˲ro\hXf|O޹o(+h (C /&9`N}DIU1< 4u5D*q Dii:Q4-',#Yx[yX9NY/_2vrPeI}qG{ . 'HwEzRJzԧu1;|>,Jxґw h12l9wF!; 2+s~67N4܋Q}?K58 { -yf9;buz^ɖn)AI12U>p /c|kP0^,h` 2Q" "AyR`jL15&ynIENDB`ipykernel-6.7.0/ipykernel/serialize.py000066400000000000000000000140331417004153500200720ustar00rootroot00000000000000"""serialization utilities for apply messages""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import warnings warnings.warn("ipykernel.serialize is deprecated. It has moved to ipyparallel.serialize", DeprecationWarning, stacklevel=2 ) import pickle from itertools import chain try: # available since ipyparallel 5.0.0 from ipyparallel.serialize.canning import ( can, uncan, can_sequence, uncan_sequence, CannedObject, istype, sequence_types, ) from ipyparallel.serialize.serialize import PICKLE_PROTOCOL except ImportError: # Deprecated since ipykernel 4.3.0 from ipykernel.pickleutil import ( can, uncan, can_sequence, uncan_sequence, CannedObject, istype, sequence_types, PICKLE_PROTOCOL, ) from jupyter_client.session import MAX_ITEMS, MAX_BYTES #----------------------------------------------------------------------------- # Serialization Functions #----------------------------------------------------------------------------- def _extract_buffers(obj, threshold=MAX_BYTES): """extract buffers larger than a certain threshold""" buffers = [] if isinstance(obj, CannedObject) and obj.buffers: for i,buf in enumerate(obj.buffers): if len(buf) > threshold: # buffer larger than threshold, prevent pickling obj.buffers[i] = None buffers.append(buf) # buffer too small for separate send, coerce to bytes # because pickling buffer objects just results in broken pointers elif isinstance(buf, memoryview): obj.buffers[i] = buf.tobytes() return buffers def _restore_buffers(obj, buffers): """restore buffers extracted by """ if isinstance(obj, CannedObject) and obj.buffers: for i,buf in enumerate(obj.buffers): if buf is None: obj.buffers[i] = buffers.pop(0) def serialize_object(obj, buffer_threshold=MAX_BYTES, item_threshold=MAX_ITEMS): """Serialize an object into a list of sendable buffers. Parameters ---------- obj : object The object to be serialized buffer_threshold : int The threshold (in bytes) for pulling out data buffers to avoid pickling them. item_threshold : int The maximum number of items over which canning will iterate. Containers (lists, dicts) larger than this will be pickled without introspection. Returns ------- [bufs] : list of buffers representing the serialized object. """ buffers = [] if istype(obj, sequence_types) and len(obj) < item_threshold: cobj = can_sequence(obj) for c in cobj: buffers.extend(_extract_buffers(c, buffer_threshold)) elif istype(obj, dict) and len(obj) < item_threshold: cobj = {} for k in sorted(obj): c = can(obj[k]) buffers.extend(_extract_buffers(c, buffer_threshold)) cobj[k] = c else: cobj = can(obj) buffers.extend(_extract_buffers(cobj, buffer_threshold)) buffers.insert(0, pickle.dumps(cobj, PICKLE_PROTOCOL)) return buffers def deserialize_object(buffers, g=None): """reconstruct an object serialized by serialize_object from data buffers. Parameters ---------- buffers : list of buffers/bytes g : globals to be used when uncanning Returns ------- (newobj, bufs) : unpacked object, and the list of remaining unused buffers. """ bufs = list(buffers) pobj = bufs.pop(0) canned = pickle.loads(pobj) if istype(canned, sequence_types) and len(canned) < MAX_ITEMS: for c in canned: _restore_buffers(c, bufs) newobj = uncan_sequence(canned, g) elif istype(canned, dict) and len(canned) < MAX_ITEMS: newobj = {} for k in sorted(canned): c = canned[k] _restore_buffers(c, bufs) newobj[k] = uncan(c, g) else: _restore_buffers(canned, bufs) newobj = uncan(canned, g) return newobj, bufs def pack_apply_message(f, args, kwargs, buffer_threshold=MAX_BYTES, item_threshold=MAX_ITEMS): """pack up a function, args, and kwargs to be sent over the wire Each element of args/kwargs will be canned for special treatment, but inspection will not go any deeper than that. Any object whose data is larger than `threshold` will not have their data copied (only numpy arrays and bytes/buffers support zero-copy) Message will be a list of bytes/buffers of the format: [ cf, pinfo, , ] With length at least two + len(args) + len(kwargs) """ arg_bufs = list(chain.from_iterable( serialize_object(arg, buffer_threshold, item_threshold) for arg in args)) kw_keys = sorted(kwargs.keys()) kwarg_bufs = list(chain.from_iterable( serialize_object(kwargs[key], buffer_threshold, item_threshold) for key in kw_keys)) info = dict(nargs=len(args), narg_bufs=len(arg_bufs), kw_keys=kw_keys) msg = [pickle.dumps(can(f), PICKLE_PROTOCOL)] msg.append(pickle.dumps(info, PICKLE_PROTOCOL)) msg.extend(arg_bufs) msg.extend(kwarg_bufs) return msg def unpack_apply_message(bufs, g=None, copy=True): """unpack f,args,kwargs from buffers packed by pack_apply_message() Returns: original f,args,kwargs""" bufs = list(bufs) # allow us to pop assert len(bufs) >= 2, "not enough buffers!" pf = bufs.pop(0) f = uncan(pickle.loads(pf), g) pinfo = bufs.pop(0) info = pickle.loads(pinfo) arg_bufs, kwarg_bufs = bufs[:info['narg_bufs']], bufs[info['narg_bufs']:] args = [] for i in range(info['nargs']): arg, arg_bufs = deserialize_object(arg_bufs, g) args.append(arg) args = tuple(args) assert not arg_bufs, "Shouldn't be any arg bufs left over" kwargs = {} for key in info['kw_keys']: kwarg, kwarg_bufs = deserialize_object(kwarg_bufs, g) kwargs[key] = kwarg assert not kwarg_bufs, "Shouldn't be any kwarg bufs left over" return f,args,kwargs ipykernel-6.7.0/ipykernel/tests/000077500000000000000000000000001417004153500166725ustar00rootroot00000000000000ipykernel-6.7.0/ipykernel/tests/__init__.py000066400000000000000000000016421417004153500210060ustar00rootroot00000000000000# Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import os import shutil import sys import tempfile from unittest.mock import patch from jupyter_core import paths as jpaths from IPython import paths as ipaths from ipykernel.kernelspec import install pjoin = os.path.join tmp = None patchers = [] def setup(): """setup temporary env for tests""" global tmp tmp = tempfile.mkdtemp() patchers[:] = [ patch.dict(os.environ, { 'HOME': tmp, # Let tests work with --user install when HOME is changed: 'PYTHONPATH': os.pathsep.join(sys.path), }), ] for p in patchers: p.start() # install IPython in the temp home: install(user=True) def teardown(): for p in patchers: p.stop() try: shutil.rmtree(tmp) except OSError: # no such file pass ipykernel-6.7.0/ipykernel/tests/_asyncio_utils.py000066400000000000000000000005531417004153500222730ustar00rootroot00000000000000"""test utilities that use async/await syntax a separate file to avoid syntax errors on Python 2 """ import asyncio def async_func(): """Simple async function to schedule a task on the current eventloop""" loop = asyncio.get_event_loop() assert loop.is_running() async def task(): await asyncio.sleep(1) loop.create_task(task()) ipykernel-6.7.0/ipykernel/tests/test_async.py000066400000000000000000000035441417004153500214260ustar00rootroot00000000000000"""Test async/await integration""" from distutils.version import LooseVersion as V import sys import pytest import IPython from .utils import execute, flush_channels, start_new_kernel, TIMEOUT from .test_message_spec import validate_message KC = KM = None def setup_function(): """start the global kernel (if it isn't running) and return its client""" global KM, KC KM, KC = start_new_kernel() flush_channels(KC) def teardown_function(): KC.stop_channels() KM.shutdown_kernel(now=True) def test_async_await(): flush_channels(KC) msg_id, content = execute("import asyncio; await asyncio.sleep(0.1)", KC) assert content["status"] == "ok", content @pytest.mark.parametrize("asynclib", ["asyncio", "trio", "curio"]) def test_async_interrupt(asynclib, request): try: __import__(asynclib) except ImportError: pytest.skip("Requires %s" % asynclib) request.addfinalizer(lambda: execute("%autoawait asyncio", KC)) flush_channels(KC) msg_id, content = execute("%autoawait " + asynclib, KC) assert content["status"] == "ok", content flush_channels(KC) msg_id = KC.execute( f"print('begin'); import {asynclib}; await {asynclib}.sleep(5)" ) busy = KC.get_iopub_msg(timeout=TIMEOUT) validate_message(busy, "status", msg_id) assert busy["content"]["execution_state"] == "busy" echo = KC.get_iopub_msg(timeout=TIMEOUT) validate_message(echo, "execute_input") stream = KC.get_iopub_msg(timeout=TIMEOUT) # wait for the stream output to be sure kernel is in the async block validate_message(stream, "stream") assert stream["content"]["text"] == "begin\n" KM.interrupt_kernel() reply = KC.get_shell_msg()["content"] assert reply["status"] == "error", reply assert reply["ename"] in {"CancelledError", "KeyboardInterrupt"} flush_channels(KC) ipykernel-6.7.0/ipykernel/tests/test_connect.py000066400000000000000000000101451417004153500217350ustar00rootroot00000000000000"""Tests for kernel connection utilities""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import errno import json import os from tempfile import TemporaryDirectory from unittest.mock import patch import pytest import zmq from traitlets.config import Config from ipykernel import connect from ipykernel.kernelapp import IPKernelApp from .utils import TemporaryWorkingDirectory sample_info = { 'ip': '1.2.3.4', 'transport': 'ipc', 'shell_port': 1, 'hb_port': 2, 'iopub_port': 3, 'stdin_port': 4, 'control_port': 5, 'key': b'abc123', 'signature_scheme': 'hmac-md5', } class DummyKernelApp(IPKernelApp): def _default_shell_port(self): return 0 def initialize(self, argv=[]): self.init_profile_dir() self.init_connection_file() def test_get_connection_file(): cfg = Config() with TemporaryWorkingDirectory() as d: cfg.ProfileDir.location = d cf = 'kernel.json' app = DummyKernelApp(config=cfg, connection_file=cf) app.initialize() profile_cf = os.path.join(app.connection_dir, cf) assert profile_cf == app.abs_connection_file with open(profile_cf, 'w') as f: f.write("{}") assert os.path.exists(profile_cf) assert connect.get_connection_file(app) == profile_cf app.connection_file = cf assert connect.get_connection_file(app) == profile_cf def test_get_connection_info(): with TemporaryDirectory() as d: cf = os.path.join(d, 'kernel.json') connect.write_connection_file(cf, **sample_info) json_info = connect.get_connection_info(cf) info = connect.get_connection_info(cf, unpack=True) assert isinstance(json_info, str) sub_info = {k: v for k, v in info.items() if k in sample_info} assert sub_info == sample_info info2 = json.loads(json_info) info2['key'] = info2['key'].encode("utf-8") sub_info2 = {k: v for k, v in info.items() if k in sample_info} assert sub_info2 == sample_info def test_port_bind_failure_raises(request): cfg = Config() with TemporaryWorkingDirectory() as d: cfg.ProfileDir.location = d cf = 'kernel.json' app = DummyKernelApp(config=cfg, connection_file=cf) request.addfinalizer(app.close) app.initialize() with patch.object(app, '_try_bind_socket') as mock_try_bind: mock_try_bind.side_effect = zmq.ZMQError(-100, "fails for unknown error types") with pytest.raises(zmq.ZMQError): app.init_sockets() assert mock_try_bind.call_count == 1 def test_port_bind_failure_recovery(request): try: errno.WSAEADDRINUSE except AttributeError: # Fake windows address in-use code p = patch.object(errno, 'WSAEADDRINUSE', 12345, create=True) p.start() request.addfinalizer(p.stop) cfg = Config() with TemporaryWorkingDirectory() as d: cfg.ProfileDir.location = d cf = 'kernel.json' app = DummyKernelApp(config=cfg, connection_file=cf) request.addfinalizer(app.close) app.initialize() with patch.object(app, '_try_bind_socket') as mock_try_bind: mock_try_bind.side_effect = [ zmq.ZMQError(errno.EADDRINUSE, "fails for non-bind unix"), zmq.ZMQError(errno.WSAEADDRINUSE, "fails for non-bind windows") ] + [0] * 100 # Shouldn't raise anything as retries will kick in app.init_sockets() def test_port_bind_failure_gives_up_retries(request): cfg = Config() with TemporaryWorkingDirectory() as d: cfg.ProfileDir.location = d cf = 'kernel.json' app = DummyKernelApp(config=cfg, connection_file=cf) request.addfinalizer(app.close) app.initialize() with patch.object(app, '_try_bind_socket') as mock_try_bind: mock_try_bind.side_effect = zmq.ZMQError(errno.EADDRINUSE, "fails for non-bind") with pytest.raises(zmq.ZMQError): app.init_sockets() assert mock_try_bind.call_count == 100 ipykernel-6.7.0/ipykernel/tests/test_debugger.py000066400000000000000000000165301417004153500220740ustar00rootroot00000000000000import sys import pytest from .utils import TIMEOUT, new_kernel, get_reply seq = 0 # Skip if debugpy is not available pytest.importorskip("debugpy") def wait_for_debug_request(kernel, command, arguments=None, full_reply=False): """Carry out a debug request and return the reply content. It does not check if the request was successful. """ global seq seq += 1 msg = kernel.session.msg( "debug_request", { "type": "request", "seq": seq, "command": command, "arguments": arguments or {}, }, ) kernel.control_channel.send(msg) reply = get_reply(kernel, msg["header"]["msg_id"], channel="control") return reply if full_reply else reply["content"] @pytest.fixture def kernel(): with new_kernel() as kc: yield kc @pytest.fixture def kernel_with_debug(kernel): # Initialize wait_for_debug_request( kernel, "initialize", { "clientID": "test-client", "clientName": "testClient", "adapterID": "", "pathFormat": "path", "linesStartAt1": True, "columnsStartAt1": True, "supportsVariableType": True, "supportsVariablePaging": True, "supportsRunInTerminalRequest": True, "locale": "en", }, ) # Attach wait_for_debug_request(kernel, "attach") try: yield kernel finally: # Detach wait_for_debug_request( kernel, "disconnect", {"restart": False, "terminateDebuggee": True} ) def test_debug_initialize(kernel): reply = wait_for_debug_request( kernel, "initialize", { "clientID": "test-client", "clientName": "testClient", "adapterID": "", "pathFormat": "path", "linesStartAt1": True, "columnsStartAt1": True, "supportsVariableType": True, "supportsVariablePaging": True, "supportsRunInTerminalRequest": True, "locale": "en", }, ) assert reply["success"] def test_attach_debug(kernel_with_debug): reply = wait_for_debug_request( kernel_with_debug, "evaluate", {"expression": "'a' + 'b'", "context": "repl"} ) assert reply["success"] assert reply["body"]["result"] == "" def test_set_breakpoints(kernel_with_debug): code = """def f(a, b): c = a + b return c f(2, 3)""" r = wait_for_debug_request(kernel_with_debug, "dumpCell", {"code": code}) source = r["body"]["sourcePath"] reply = wait_for_debug_request( kernel_with_debug, "setBreakpoints", { "breakpoints": [{"line": 2}], "source": {"path": source}, "sourceModified": False, }, ) assert reply["success"] assert len(reply["body"]["breakpoints"]) == 1 assert reply["body"]["breakpoints"][0]["verified"] assert reply["body"]["breakpoints"][0]["source"]["path"] == source r = wait_for_debug_request(kernel_with_debug, "debugInfo") assert source in map(lambda b: b["source"], r["body"]["breakpoints"]) r = wait_for_debug_request(kernel_with_debug, "configurationDone") assert r["success"] def test_stop_on_breakpoint(kernel_with_debug): code = """def f(a, b): c = a + b return c f(2, 3)""" r = wait_for_debug_request(kernel_with_debug, "dumpCell", {"code": code}) source = r["body"]["sourcePath"] wait_for_debug_request(kernel_with_debug, "debugInfo") wait_for_debug_request( kernel_with_debug, "setBreakpoints", { "breakpoints": [{"line": 2}], "source": {"path": source}, "sourceModified": False, }, ) wait_for_debug_request(kernel_with_debug, "configurationDone", full_reply=True) kernel_with_debug.execute(code) # Wait for stop on breakpoint msg = {"msg_type": "", "content": {}} while msg.get('msg_type') != 'debug_event' or msg["content"].get("event") != "stopped": msg = kernel_with_debug.get_iopub_msg(timeout=TIMEOUT) assert msg["content"]["body"]["reason"] == "breakpoint" @pytest.mark.skipif(sys.version_info >= (3, 10), reason="TODO Does not work on Python 3.10") def test_breakpoint_in_cell_with_leading_empty_lines(kernel_with_debug): code = """ def f(a, b): c = a + b return c f(2, 3)""" r = wait_for_debug_request(kernel_with_debug, "dumpCell", {"code": code}) source = r["body"]["sourcePath"] wait_for_debug_request(kernel_with_debug, "debugInfo") wait_for_debug_request( kernel_with_debug, "setBreakpoints", { "breakpoints": [{"line": 6}], "source": {"path": source}, "sourceModified": False, }, ) wait_for_debug_request(kernel_with_debug, "configurationDone", full_reply=True) kernel_with_debug.execute(code) # Wait for stop on breakpoint msg = {"msg_type": "", "content": {}} while msg.get('msg_type') != 'debug_event' or msg["content"].get("event") != "stopped": msg = kernel_with_debug.get_iopub_msg(timeout=TIMEOUT) assert msg["content"]["body"]["reason"] == "breakpoint" def test_rich_inspect_not_at_breakpoint(kernel_with_debug): var_name = "text" value = "Hello the world" code = f"""{var_name}='{value}' print({var_name}) """ msg_id = kernel_with_debug.execute(code) get_reply(kernel_with_debug, msg_id) r = wait_for_debug_request(kernel_with_debug, "inspectVariables") assert var_name in list(map(lambda v: v["name"], r["body"]["variables"])) reply = wait_for_debug_request( kernel_with_debug, "richInspectVariables", {"variableName": var_name}, ) assert reply["body"]["data"] == {"text/plain": f"'{value}'"} def test_rich_inspect_at_breakpoint(kernel_with_debug): code = """def f(a, b): c = a + b return c f(2, 3)""" r = wait_for_debug_request(kernel_with_debug, "dumpCell", {"code": code}) source = r["body"]["sourcePath"] wait_for_debug_request( kernel_with_debug, "setBreakpoints", { "breakpoints": [{"line": 2}], "source": {"path": source}, "sourceModified": False, }, ) r = wait_for_debug_request(kernel_with_debug, "debugInfo") r = wait_for_debug_request(kernel_with_debug, "configurationDone") kernel_with_debug.execute(code) stacks = wait_for_debug_request(kernel_with_debug, "stackTrace", {"threadId": 1})[ "body" ]["stackFrames"] scopes = wait_for_debug_request( kernel_with_debug, "scopes", {"frameId": stacks[0]["id"]} )["body"]["scopes"] locals_ = wait_for_debug_request( kernel_with_debug, "variables", { "variablesReference": next(filter(lambda s: s["name"] == "Locals", scopes))[ "variablesReference" ] }, )["body"]["variables"] reply = wait_for_debug_request( kernel_with_debug, "richInspectVariables", {"variableName": locals_[0]["name"], "frameId": stacks[0]["id"]}, ) assert reply["body"]["data"] == {"text/plain": locals_[0]["value"]} def test_convert_to_long_pathname(): if sys.platform == 'win32': from ipykernel.compiler import _convert_to_long_pathname _convert_to_long_pathname(__file__)ipykernel-6.7.0/ipykernel/tests/test_embed_kernel.py000066400000000000000000000116131417004153500227210ustar00rootroot00000000000000"""test IPython.embed_kernel()""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import os import sys import time import json from contextlib import contextmanager from subprocess import Popen, PIPE from flaky import flaky from jupyter_client import BlockingKernelClient from jupyter_core import paths SETUP_TIMEOUT = 60 TIMEOUT = 15 @contextmanager def setup_kernel(cmd): """start an embedded kernel in a subprocess, and wait for it to be ready Returns ------- kernel_manager: connected KernelManager instance """ def connection_file_ready(connection_file): """Check if connection_file is a readable json file.""" if not os.path.exists(connection_file): return False try: with open(connection_file) as f: json.load(f) return True except ValueError: return False kernel = Popen([sys.executable, '-c', cmd], stdout=PIPE, stderr=PIPE, encoding="utf-8") try: connection_file = os.path.join( paths.jupyter_runtime_dir(), 'kernel-%i.json' % kernel.pid, ) # wait for connection file to exist, timeout after 5s tic = time.time() while not connection_file_ready(connection_file) \ and kernel.poll() is None \ and time.time() < tic + SETUP_TIMEOUT: time.sleep(0.1) # Wait 100ms for the writing to finish time.sleep(0.1) if kernel.poll() is not None: o, e = kernel.communicate() raise OSError("Kernel failed to start:\n%s" % e) if not os.path.exists(connection_file): if kernel.poll() is None: kernel.terminate() raise OSError("Connection file %r never arrived" % connection_file) client = BlockingKernelClient(connection_file=connection_file) client.load_connection_file() client.start_channels() client.wait_for_ready() try: yield client finally: client.stop_channels() finally: kernel.terminate() @flaky(max_runs=3) def test_embed_kernel_basic(): """IPython.embed_kernel() is basically functional""" cmd = '\n'.join([ 'from IPython import embed_kernel', 'def go():', ' a=5', ' b="hi there"', ' embed_kernel()', 'go()', '', ]) with setup_kernel(cmd) as client: # oinfo a (int) client.inspect("a") msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content'] assert content['found'] client.execute("c=a*2") msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content'] assert content['status'] == 'ok' # oinfo c (should be 10) client.inspect("c") msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content'] assert content['found'] text = content['data']['text/plain'] assert '10' in text @flaky(max_runs=3) def test_embed_kernel_namespace(): """IPython.embed_kernel() inherits calling namespace""" cmd = '\n'.join([ 'from IPython import embed_kernel', 'def go():', ' a=5', ' b="hi there"', ' embed_kernel()', 'go()', '', ]) with setup_kernel(cmd) as client: # oinfo a (int) client.inspect("a") msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content'] assert content['found'] text = content['data']['text/plain'] assert '5' in text # oinfo b (str) client.inspect("b") msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content'] assert content['found'] text = content['data']['text/plain'] assert 'hi there' in text # oinfo c (undefined) client.inspect("c") msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content'] assert not content['found'] @flaky(max_runs=3) def test_embed_kernel_reentrant(): """IPython.embed_kernel() can be called multiple times""" cmd = '\n'.join([ 'from IPython import embed_kernel', 'count = 0', 'def go():', ' global count', ' embed_kernel()', ' count = count + 1', '', 'while True:' ' go()', '', ]) with setup_kernel(cmd) as client: for i in range(5): client.inspect("count") msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content'] assert content['found'] text = content['data']['text/plain'] assert str(i) in text # exit from embed_kernel client.execute("get_ipython().exit_now = True") msg = client.get_shell_msg(timeout=TIMEOUT) time.sleep(0.2) ipykernel-6.7.0/ipykernel/tests/test_eventloop.py000066400000000000000000000017121417004153500223170ustar00rootroot00000000000000"""Test eventloop integration""" import sys import pytest import tornado from .utils import flush_channels, start_new_kernel, execute KC = KM = None def setup(): """start the global kernel (if it isn't running) and return its client""" global KM, KC KM, KC = start_new_kernel() flush_channels(KC) def teardown(): KC.stop_channels() KM.shutdown_kernel(now=True) async_code = """ from ipykernel.tests._asyncio_utils import async_func async_func() """ @pytest.mark.skipif(tornado.version_info < (5,), reason="only relevant on tornado 5") def test_asyncio_interrupt(): flush_channels(KC) msg_id, content = execute('%gui asyncio', KC) assert content['status'] == 'ok', content flush_channels(KC) msg_id, content = execute(async_code, KC) assert content['status'] == 'ok', content KM.interrupt_kernel() flush_channels(KC) msg_id, content = execute(async_code, KC) assert content['status'] == 'ok' ipykernel-6.7.0/ipykernel/tests/test_heartbeat.py000066400000000000000000000035401417004153500222440ustar00rootroot00000000000000"""Tests for heartbeat thread""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import errno from unittest.mock import patch import pytest import zmq from ipykernel.heartbeat import Heartbeat def test_port_bind_failure_raises(): heart = Heartbeat(None) with patch.object(heart, '_try_bind_socket') as mock_try_bind: mock_try_bind.side_effect = zmq.ZMQError(-100, "fails for unknown error types") with pytest.raises(zmq.ZMQError): heart._bind_socket() assert mock_try_bind.call_count == 1 def test_port_bind_success(): heart = Heartbeat(None) with patch.object(heart, '_try_bind_socket') as mock_try_bind: heart._bind_socket() assert mock_try_bind.call_count == 1 def test_port_bind_failure_recovery(): try: errno.WSAEADDRINUSE except AttributeError: # Fake windows address in-use code errno.WSAEADDRINUSE = 12345 try: heart = Heartbeat(None) with patch.object(heart, '_try_bind_socket') as mock_try_bind: mock_try_bind.side_effect = [ zmq.ZMQError(errno.EADDRINUSE, "fails for non-bind unix"), zmq.ZMQError(errno.WSAEADDRINUSE, "fails for non-bind windows") ] + [0] * 100 # Shouldn't raise anything as retries will kick in heart._bind_socket() finally: # Cleanup fake assignment if errno.WSAEADDRINUSE == 12345: del errno.WSAEADDRINUSE def test_port_bind_failure_gives_up_retries(): heart = Heartbeat(None) with patch.object(heart, '_try_bind_socket') as mock_try_bind: mock_try_bind.side_effect = zmq.ZMQError(errno.EADDRINUSE, "fails for non-bind") with pytest.raises(zmq.ZMQError): heart._bind_socket() assert mock_try_bind.call_count == 100 ipykernel-6.7.0/ipykernel/tests/test_io.py000066400000000000000000000025201417004153500207110ustar00rootroot00000000000000"""Test IO capturing functionality""" import io import zmq import pytest from jupyter_client.session import Session from ipykernel.iostream import IOPubThread, OutStream def test_io_api(): """Test that wrapped stdout has the same API as a normal TextIO object""" session = Session() ctx = zmq.Context() pub = ctx.socket(zmq.PUB) thread = IOPubThread(pub) thread.start() stream = OutStream(session, thread, 'stdout') # cleanup unused zmq objects before we start testing thread.stop() thread.close() ctx.term() assert stream.errors is None assert not stream.isatty() with pytest.raises(io.UnsupportedOperation): stream.detach() with pytest.raises(io.UnsupportedOperation): next(stream) with pytest.raises(io.UnsupportedOperation): stream.read() with pytest.raises(io.UnsupportedOperation): stream.readline() with pytest.raises(io.UnsupportedOperation): stream.seek(0) with pytest.raises(io.UnsupportedOperation): stream.tell() with pytest.raises(TypeError): stream.write(b'') def test_io_isatty(): session = Session() ctx = zmq.Context() pub = ctx.socket(zmq.PUB) thread = IOPubThread(pub) thread.start() stream = OutStream(session, thread, 'stdout', isatty=True) assert stream.isatty() ipykernel-6.7.0/ipykernel/tests/test_jsonutil.py000066400000000000000000000066371417004153500221660ustar00rootroot00000000000000"""Test suite for our JSON utilities.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from binascii import a2b_base64 import json from datetime import datetime import numbers import pytest from jupyter_client._version import version_info as jupyter_client_version from .. import jsonutil from ..jsonutil import json_clean, encode_images JUPYTER_CLIENT_MAJOR_VERSION = jupyter_client_version[0] class MyInt: def __int__(self): return 389 numbers.Integral.register(MyInt) class MyFloat: def __float__(self): return 3.14 numbers.Real.register(MyFloat) @pytest.mark.skipif(JUPYTER_CLIENT_MAJOR_VERSION >= 7, reason="json_clean is a no-op") def test(): # list of input/expected output. Use None for the expected output if it # can be the same as the input. pairs = [(1, None), # start with scalars (1.0, None), ('a', None), (True, None), (False, None), (None, None), # Containers ([1, 2], None), ((1, 2), [1, 2]), ({1, 2}, [1, 2]), (dict(x=1), None), ({'x': 1, 'y':[1,2,3], '1':'int'}, None), # More exotic objects ((x for x in range(3)), [0, 1, 2]), (iter([1, 2]), [1, 2]), (datetime(1991, 7, 3, 12, 00), "1991-07-03T12:00:00.000000"), (MyFloat(), 3.14), (MyInt(), 389) ] for val, jval in pairs: if jval is None: jval = val out = json_clean(val) # validate our cleanup assert out == jval # and ensure that what we return, indeed encodes cleanly json.loads(json.dumps(out)) @pytest.mark.skipif(JUPYTER_CLIENT_MAJOR_VERSION >= 7, reason="json_clean is a no-op") def test_encode_images(): # invalid data, but the header and footer are from real files pngdata = b'\x89PNG\r\n\x1a\nblahblahnotactuallyvalidIEND\xaeB`\x82' jpegdata = b'\xff\xd8\xff\xe0\x00\x10JFIFblahblahjpeg(\xa0\x0f\xff\xd9' pdfdata = b'%PDF-1.\ntrailer<>]>>>>>>' bindata = b'\xff\xff\xff\xff' fmt = { 'image/png' : pngdata, 'image/jpeg' : jpegdata, 'application/pdf' : pdfdata, 'application/unrecognized': bindata, } encoded = json_clean(encode_images(fmt)) for key, value in fmt.items(): # encoded has unicode, want bytes decoded = a2b_base64(encoded[key]) assert decoded == value encoded2 = json_clean(encode_images(encoded)) assert encoded == encoded2 for key, value in fmt.items(): decoded = a2b_base64(encoded[key]) assert decoded == value @pytest.mark.skipif(JUPYTER_CLIENT_MAJOR_VERSION >= 7, reason="json_clean is a no-op") def test_lambda(): with pytest.raises(ValueError): json_clean(lambda : 1) @pytest.mark.skipif(JUPYTER_CLIENT_MAJOR_VERSION >= 7, reason="json_clean is a no-op") def test_exception(): bad_dicts = [{1:'number', '1':'string'}, {True:'bool', 'True':'string'}, ] for d in bad_dicts: with pytest.raises(ValueError): json_clean(d) @pytest.mark.skipif(JUPYTER_CLIENT_MAJOR_VERSION >= 7, reason="json_clean is a no-op") def test_unicode_dict(): data = {'üniço∂e': 'üniço∂e'} clean = jsonutil.json_clean(data) assert data == clean ipykernel-6.7.0/ipykernel/tests/test_kernel.py000066400000000000000000000404721417004153500215720ustar00rootroot00000000000000"""test the IPython Kernel""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import ast import os.path import platform import subprocess import sys import time from tempfile import TemporaryDirectory from flaky import flaky import pytest from packaging import version import IPython from IPython.paths import locate_profile from .utils import ( new_kernel, kernel, TIMEOUT, assemble_output, execute, flush_channels, wait_for_idle, get_reply, ) def _check_master(kc, expected=True, stream="stdout"): execute(kc=kc, code="import sys") flush_channels(kc) msg_id, content = execute(kc=kc, code="print(sys.%s._is_master_process())" % stream) stdout, stderr = assemble_output(kc.get_iopub_msg) assert stdout.strip() == repr(expected) def _check_status(content): """If status=error, show the traceback""" if content['status'] == 'error': assert False, ''.join(['\n'] + content['traceback']) # printing tests def test_simple_print(): """simple print statement in kernel""" with kernel() as kc: msg_id, content = execute(kc=kc, code="print('hi')") stdout, stderr = assemble_output(kc.get_iopub_msg) assert stdout == 'hi\n' assert stderr == '' _check_master(kc, expected=True) @pytest.mark.skip( reason="Currently don't capture during test as pytest does its own capturing" ) def test_capture_fd(): """simple print statement in kernel""" with kernel() as kc: iopub = kc.iopub_channel msg_id, content = execute(kc=kc, code="import os; os.system('echo capsys')") stdout, stderr = assemble_output(iopub) assert stdout == "capsys\n" assert stderr == "" _check_master(kc, expected=True) @pytest.mark.skip( reason="Currently don't capture during test as pytest does its own capturing" ) def test_subprocess_peek_at_stream_fileno(): """""" with kernel() as kc: iopub = kc.iopub_channel msg_id, content = execute( kc=kc, code="import subprocess, sys; subprocess.run(['python', '-c', 'import os; os.system(\"echo CAP1\"); print(\"CAP2\")'], stderr=sys.stderr)", ) stdout, stderr = assemble_output(iopub) assert stdout == "CAP1\nCAP2\n" assert stderr == "" _check_master(kc, expected=True) def test_sys_path(): """test that sys.path doesn't get messed up by default""" with kernel() as kc: msg_id, content = execute(kc=kc, code="import sys; print(repr(sys.path))") stdout, stderr = assemble_output(kc.get_iopub_msg) # for error-output on failure sys.stderr.write(stderr) sys_path = ast.literal_eval(stdout.strip()) assert '' in sys_path def test_sys_path_profile_dir(): """test that sys.path doesn't get messed up when `--profile-dir` is specified""" with new_kernel(['--profile-dir', locate_profile('default')]) as kc: msg_id, content = execute(kc=kc, code="import sys; print(repr(sys.path))") stdout, stderr = assemble_output(kc.get_iopub_msg) # for error-output on failure sys.stderr.write(stderr) sys_path = ast.literal_eval(stdout.strip()) assert '' in sys_path @flaky(max_runs=3) @pytest.mark.skipif( sys.platform == "win32" or (sys.platform == "darwin" and sys.version_info >= (3, 8)), reason="subprocess prints fail on Windows and MacOS Python 3.8+", ) def test_subprocess_print(): """printing from forked mp.Process""" with new_kernel() as kc: _check_master(kc, expected=True) flush_channels(kc) np = 5 code = '\n'.join([ "import time", "import multiprocessing as mp", "pool = [mp.Process(target=print, args=('hello', i,)) for i in range(%i)]" % np, "for p in pool: p.start()", "for p in pool: p.join()", "time.sleep(0.5)," ]) msg_id, content = execute(kc=kc, code=code) stdout, stderr = assemble_output(kc.get_iopub_msg) assert stdout.count("hello") == np, stdout for n in range(np): assert stdout.count(str(n)) == 1, stdout assert stderr == "" _check_master(kc, expected=True) _check_master(kc, expected=True, stream="stderr") @flaky(max_runs=3) def test_subprocess_noprint(): """mp.Process without print doesn't trigger iostream mp_mode""" with kernel() as kc: np = 5 code = '\n'.join([ "import multiprocessing as mp", "pool = [mp.Process(target=range, args=(i,)) for i in range(%i)]" % np, "for p in pool: p.start()", "for p in pool: p.join()" ]) msg_id, content = execute(kc=kc, code=code) stdout, stderr = assemble_output(kc.get_iopub_msg) assert stdout == '' assert stderr == '' _check_master(kc, expected=True) _check_master(kc, expected=True, stream="stderr") @flaky(max_runs=3) @pytest.mark.skipif( sys.platform == "win32" or (sys.platform == "darwin" and sys.version_info >= (3, 8)), reason="subprocess prints fail on Windows and MacOS Python 3.8+", ) def test_subprocess_error(): """error in mp.Process doesn't crash""" with new_kernel() as kc: code = '\n'.join([ "import multiprocessing as mp", "p = mp.Process(target=int, args=('hi',))", "p.start()", "p.join()", ]) msg_id, content = execute(kc=kc, code=code) stdout, stderr = assemble_output(kc.get_iopub_msg) assert stdout == '' assert "ValueError" in stderr _check_master(kc, expected=True) _check_master(kc, expected=True, stream="stderr") # raw_input tests def test_raw_input(): """test input""" with kernel() as kc: iopub = kc.iopub_channel input_f = "input" theprompt = "prompt> " code = 'print({input_f}("{theprompt}"))'.format(**locals()) msg_id = kc.execute(code, allow_stdin=True) msg = kc.get_stdin_msg(timeout=TIMEOUT) assert msg['header']['msg_type'] == 'input_request' content = msg['content'] assert content['prompt'] == theprompt text = "some text" kc.input(text) reply = kc.get_shell_msg(timeout=TIMEOUT) assert reply['content']['status'] == 'ok' stdout, stderr = assemble_output(kc.get_iopub_msg) assert stdout == text + "\n" def test_save_history(): # Saving history from the kernel with %hist -f was failing because of # unicode problems on Python 2. with kernel() as kc, TemporaryDirectory() as td: file = os.path.join(td, 'hist.out') execute('a=1', kc=kc) wait_for_idle(kc) execute('b="abcþ"', kc=kc) wait_for_idle(kc) _, reply = execute("%hist -f " + file, kc=kc) assert reply['status'] == 'ok' with open(file, encoding='utf-8') as f: content = f.read() assert 'a=1' in content assert 'b="abcþ"' in content def test_smoke_faulthandler(): faulthadler = pytest.importorskip('faulthandler', reason='this test needs faulthandler') with kernel() as kc: # Note: faulthandler.register is not available on windows. code = '\n'.join([ 'import sys', 'import faulthandler', 'import signal', 'faulthandler.enable()', 'if not sys.platform.startswith("win32"):', ' faulthandler.register(signal.SIGTERM)']) _, reply = execute(code, kc=kc) assert reply["status"] == "ok", reply.get("traceback", "") def test_help_output(): """ipython kernel --help-all works""" cmd = [sys.executable, "-m", "IPython", "kernel", "--help-all"] proc = subprocess.run(cmd, timeout=30, capture_output=True) assert proc.returncode == 0, proc.stderr assert b"Traceback" not in proc.stderr assert b"Options" in proc.stdout assert b"Class" in proc.stdout def test_is_complete(): with kernel() as kc: # There are more test cases for this in core - here we just check # that the kernel exposes the interface correctly. kc.is_complete('2+2') reply = kc.get_shell_msg(timeout=TIMEOUT) assert reply['content']['status'] == 'complete' # SyntaxError kc.is_complete('raise = 2') reply = kc.get_shell_msg(timeout=TIMEOUT) assert reply['content']['status'] == 'invalid' kc.is_complete('a = [1,\n2,') reply = kc.get_shell_msg(timeout=TIMEOUT) assert reply['content']['status'] == 'incomplete' assert reply['content']['indent'] == '' # Cell magic ends on two blank lines for console UIs kc.is_complete('%%timeit\na\n\n') reply = kc.get_shell_msg(timeout=TIMEOUT) assert reply['content']['status'] == 'complete' @pytest.mark.skipif(sys.platform != "win32", reason="only run on Windows") def test_complete(): with kernel() as kc: execute('a = 1', kc=kc) wait_for_idle(kc) cell = 'import IPython\nb = a.' kc.complete(cell) reply = kc.get_shell_msg(timeout=TIMEOUT) c = reply['content'] assert c['status'] == 'ok' start = cell.find('a.') end = start + 2 assert c['cursor_end'] == cell.find('a.') + 2 assert c['cursor_start'] <= end # there are many right answers for cursor_start, # so verify application of the completion # rather than the value of cursor_start matches = c['matches'] assert matches for m in matches: completed = cell[:c['cursor_start']] + m assert completed.startswith(cell) def test_matplotlib_inline_on_import(): pytest.importorskip('matplotlib', reason='this test requires matplotlib') with kernel() as kc: cell = '\n'.join([ 'import matplotlib, matplotlib.pyplot as plt', 'backend = matplotlib.get_backend()' ]) _, reply = execute(cell, user_expressions={'backend': 'backend'}, kc=kc) _check_status(reply) backend_bundle = reply['user_expressions']['backend'] _check_status(backend_bundle) assert 'backend_inline' in backend_bundle['data']['text/plain'] def test_message_order(): N = 100 # number of messages to test with kernel() as kc: _, reply = execute("a = 1", kc=kc) _check_status(reply) offset = reply['execution_count'] + 1 cell = "a += 1\na" msg_ids = [] # submit N executions as fast as we can for i in range(N): msg_ids.append(kc.execute(cell)) # check message-handling order for i, msg_id in enumerate(msg_ids, offset): reply = kc.get_shell_msg(timeout=TIMEOUT) _check_status(reply['content']) assert reply['content']['execution_count'] == i assert reply['parent_header']['msg_id'] == msg_id @pytest.mark.skipif( sys.platform.startswith("linux") or sys.platform.startswith("darwin"), reason="test only on windows", ) def test_unc_paths(): with kernel() as kc, TemporaryDirectory() as td: drive_file_path = os.path.join(td, 'unc.txt') with open(drive_file_path, 'w+') as f: f.write('# UNC test') unc_root = '\\\\localhost\\C$' file_path = os.path.splitdrive(os.path.dirname(drive_file_path))[1] unc_file_path = os.path.join(unc_root, file_path[1:]) kc.execute(f"cd {unc_file_path:s}") reply = kc.get_shell_msg(timeout=TIMEOUT) assert reply['content']['status'] == 'ok' out, err = assemble_output(kc.get_iopub_msg) assert unc_file_path in out flush_channels(kc) kc.execute(code="ls") reply = kc.get_shell_msg(timeout=TIMEOUT) assert reply['content']['status'] == 'ok' out, err = assemble_output(kc.get_iopub_msg) assert 'unc.txt' in out kc.execute(code="cd") reply = kc.get_shell_msg(timeout=TIMEOUT) assert reply['content']['status'] == 'ok' @pytest.mark.skipif( platform.python_implementation() == "PyPy", reason="does not work on PyPy", ) def test_shutdown(): """Kernel exits after polite shutdown_request""" with new_kernel() as kc: km = kc.parent execute('a = 1', kc=kc) wait_for_idle(kc) kc.shutdown() for i in range(300): # 30s timeout if km.is_alive(): time.sleep(.1) else: break assert not km.is_alive() def test_interrupt_during_input(): """ The kernel exits after being interrupted while waiting in input(). input() appears to have issues other functions don't, and it needs to be interruptible in order for pdb to be interruptible. """ with new_kernel() as kc: km = kc.parent msg_id = kc.execute("input()") time.sleep(1) # Make sure it's actually waiting for input. km.interrupt_kernel() from .test_message_spec import validate_message # If we failed to interrupt interrupt, this will timeout: reply = get_reply(kc, msg_id, TIMEOUT) validate_message(reply, 'execute_reply', msg_id) @pytest.mark.skipif( os.name == "nt", reason="Message based interrupt not supported on Windows" ) def test_interrupt_with_message(): """ """ with new_kernel() as kc: km = kc.parent km.kernel_spec.interrupt_mode = "message" msg_id = kc.execute("input()") time.sleep(1) # Make sure it's actually waiting for input. km.interrupt_kernel() from .test_message_spec import validate_message # If we failed to interrupt interrupt, this will timeout: reply = get_reply(kc, msg_id, TIMEOUT) validate_message(reply, 'execute_reply', msg_id) @pytest.mark.skipif( "__pypy__" in sys.builtin_module_names, reason="fails on pypy", ) def test_interrupt_during_pdb_set_trace(): """ The kernel exits after being interrupted while waiting in pdb.set_trace(). Merely testing input() isn't enough, pdb has its own issues that need to be handled in addition. This test will fail with versions of IPython < 7.14.0. """ with new_kernel() as kc: km = kc.parent msg_id = kc.execute("import pdb; pdb.set_trace()") msg_id2 = kc.execute("3 + 4") time.sleep(1) # Make sure it's actually waiting for input. km.interrupt_kernel() from .test_message_spec import validate_message # If we failed to interrupt interrupt, this will timeout: reply = get_reply(kc, msg_id, TIMEOUT) validate_message(reply, 'execute_reply', msg_id) # If we failed to interrupt interrupt, this will timeout: reply = get_reply(kc, msg_id2, TIMEOUT) validate_message(reply, 'execute_reply', msg_id2) def test_control_thread_priority(): N = 5 with new_kernel() as kc: msg_id = kc.execute("pass") get_reply(kc, msg_id) sleep_msg_id = kc.execute("import asyncio; await asyncio.sleep(2)") # submit N shell messages shell_msg_ids = [] for i in range(N): shell_msg_ids.append(kc.execute(f"i = {i}")) # ensure all shell messages have arrived at the kernel before any control messages time.sleep(0.5) # at this point, shell messages should be waiting in msg_queue, # rather than zmq while the kernel is still in the middle of processing # the first execution # now send N control messages control_msg_ids = [] for i in range(N): msg = kc.session.msg("kernel_info_request", {}) kc.control_channel.send(msg) control_msg_ids.append(msg["header"]["msg_id"]) # finally, collect the replies on both channels for comparison get_reply(kc, sleep_msg_id) shell_replies = [] for msg_id in shell_msg_ids: shell_replies.append(get_reply(kc, msg_id)) control_replies = [] for msg_id in control_msg_ids: control_replies.append(get_reply(kc, msg_id, channel="control")) # verify that all control messages were handled before all shell messages shell_dates = [msg["header"]["date"] for msg in shell_replies] control_dates = [msg["header"]["date"] for msg in control_replies] # comparing first to last ought to be enough, since queues preserve order # use <= in case of very-fast handling and/or low resolution timers assert control_dates[-1] <= shell_dates[0] ipykernel-6.7.0/ipykernel/tests/test_kernelspec.py000066400000000000000000000101741417004153500224410ustar00rootroot00000000000000# Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import json import os import shutil import sys import tempfile from unittest import mock from jupyter_core.paths import jupyter_data_dir from ipykernel.kernelspec import ( make_ipkernel_cmd, get_kernel_dict, write_kernel_spec, install, InstallIPythonKernelSpecApp, KERNEL_NAME, RESOURCES, ) import pytest pjoin = os.path.join def test_make_ipkernel_cmd(): cmd = make_ipkernel_cmd() assert cmd == [ sys.executable, '-m', 'ipykernel_launcher', '-f', '{connection_file}' ] def assert_kernel_dict(d): assert d['argv'] == make_ipkernel_cmd() assert d['display_name'] == 'Python %i (ipykernel)' % sys.version_info[0] assert d['language'] == 'python' def test_get_kernel_dict(): d = get_kernel_dict() assert_kernel_dict(d) def assert_kernel_dict_with_profile(d): assert d["argv"] == make_ipkernel_cmd(extra_arguments=["--profile", "test"]) assert d["display_name"] == "Python %i (ipykernel)" % sys.version_info[0] assert d["language"] == "python" def test_get_kernel_dict_with_profile(): d = get_kernel_dict(["--profile", "test"]) assert_kernel_dict_with_profile(d) def assert_is_spec(path): for fname in os.listdir(RESOURCES): dst = pjoin(path, fname) assert os.path.exists(dst) kernel_json = pjoin(path, 'kernel.json') assert os.path.exists(kernel_json) with open(kernel_json, encoding='utf8') as f: json.load(f) def test_write_kernel_spec(): path = write_kernel_spec() assert_is_spec(path) shutil.rmtree(path) def test_write_kernel_spec_path(): path = os.path.join(tempfile.mkdtemp(), KERNEL_NAME) path2 = write_kernel_spec(path) assert path == path2 assert_is_spec(path) shutil.rmtree(path) def test_install_kernelspec(): path = tempfile.mkdtemp() try: InstallIPythonKernelSpecApp.launch_instance(argv=["--prefix", path]) assert_is_spec(os.path.join(path, "share", "jupyter", "kernels", KERNEL_NAME)) finally: shutil.rmtree(path) def test_install_user(): tmp = tempfile.mkdtemp() with mock.patch.dict(os.environ, {'HOME': tmp}): install(user=True) data_dir = jupyter_data_dir() assert_is_spec(os.path.join(data_dir, 'kernels', KERNEL_NAME)) def test_install(): system_jupyter_dir = tempfile.mkdtemp() with mock.patch('jupyter_client.kernelspec.SYSTEM_JUPYTER_PATH', [system_jupyter_dir]): install() assert_is_spec(os.path.join(system_jupyter_dir, 'kernels', KERNEL_NAME)) def test_install_profile(): system_jupyter_dir = tempfile.mkdtemp() with mock.patch('jupyter_client.kernelspec.SYSTEM_JUPYTER_PATH', [system_jupyter_dir]): install(profile="Test") spec = os.path.join(system_jupyter_dir, 'kernels', KERNEL_NAME, "kernel.json") with open(spec) as f: spec = json.load(f) assert spec["display_name"].endswith(" [profile=Test]") assert spec["argv"][-2:] == ["--profile", "Test"] def test_install_display_name_overrides_profile(): system_jupyter_dir = tempfile.mkdtemp() with mock.patch('jupyter_client.kernelspec.SYSTEM_JUPYTER_PATH', [system_jupyter_dir]): install(display_name="Display", profile="Test") spec = os.path.join(system_jupyter_dir, 'kernels', KERNEL_NAME, "kernel.json") with open(spec) as f: spec = json.load(f) assert spec["display_name"] == "Display" @pytest.mark.parametrize("env", [ None, dict(spam="spam"), dict(spam="spam", foo='bar') ]) def test_install_env(tmp_path, env): # python 3.5 // tmp_path must be converted to str with mock.patch('jupyter_client.kernelspec.SYSTEM_JUPYTER_PATH', [str(tmp_path)]): install(env=env) spec = tmp_path / 'kernels' / KERNEL_NAME / "kernel.json" with spec.open() as f: spec = json.load(f) if env: assert len(env) == len(spec['env']) for k, v in env.items(): assert spec['env'][k] == v else: assert 'env' not in spec ipykernel-6.7.0/ipykernel/tests/test_message_spec.py000066400000000000000000000366471417004153500227610ustar00rootroot00000000000000"""Test suite for our zeromq-based message specification.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import re import sys from distutils.version import LooseVersion as V from queue import Empty import pytest import jupyter_client from traitlets import ( HasTraits, TraitError, Bool, Unicode, Dict, Integer, List, Enum ) from .utils import (TIMEOUT, start_global_kernel, flush_channels, execute, get_reply, ) #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- KC = None def setup(): global KC KC = start_global_kernel() #----------------------------------------------------------------------------- # Message Spec References #----------------------------------------------------------------------------- class Reference(HasTraits): """ Base class for message spec specification testing. This class is the core of the message specification test. The idea is that child classes implement trait attributes for each message keys, so that message keys can be tested against these traits using :meth:`check` method. """ def check(self, d): """validate a dict against our traits""" for key in self.trait_names(): assert key in d # FIXME: always allow None, probably not a good idea if d[key] is None: continue try: setattr(self, key, d[key]) except TraitError as e: assert False, str(e) class Version(Unicode): def __init__(self, *args, **kwargs): self.min = kwargs.pop('min', None) self.max = kwargs.pop('max', None) kwargs['default_value'] = self.min super().__init__(*args, **kwargs) def validate(self, obj, value): if self.min and V(value) < V(self.min): raise TraitError("bad version: %s < %s" % (value, self.min)) if self.max and (V(value) > V(self.max)): raise TraitError("bad version: %s > %s" % (value, self.max)) class RMessage(Reference): msg_id = Unicode() msg_type = Unicode() header = Dict() parent_header = Dict() content = Dict() def check(self, d): super().check(d) RHeader().check(self.header) if self.parent_header: RHeader().check(self.parent_header) class RHeader(Reference): msg_id = Unicode() msg_type = Unicode() session = Unicode() username = Unicode() version = Version(min='5.0') mime_pat = re.compile(r'^[\w\-\+\.]+/[\w\-\+\.]+$') class MimeBundle(Reference): metadata = Dict() data = Dict() def _data_changed(self, name, old, new): for k,v in new.items(): assert mime_pat.match(k) assert isinstance(v, str) # shell replies class Reply(Reference): status = Enum(('ok', 'error'), default_value='ok') class ExecuteReply(Reply): execution_count = Integer() def check(self, d): Reference.check(self, d) if d['status'] == 'ok': ExecuteReplyOkay().check(d) elif d['status'] == 'error': ExecuteReplyError().check(d) elif d['status'] == 'aborted': ExecuteReplyAborted().check(d) class ExecuteReplyOkay(Reply): status = Enum(('ok',)) user_expressions = Dict() class ExecuteReplyError(Reply): status = Enum(('error',)) ename = Unicode() evalue = Unicode() traceback = List(Unicode()) class ExecuteReplyAborted(Reply): status = Enum(('aborted',)) class InspectReply(Reply, MimeBundle): found = Bool() class ArgSpec(Reference): args = List(Unicode()) varargs = Unicode() varkw = Unicode() defaults = List() class Status(Reference): execution_state = Enum(('busy', 'idle', 'starting'), default_value='busy') class CompleteReply(Reply): matches = List(Unicode()) cursor_start = Integer() cursor_end = Integer() status = Unicode() class LanguageInfo(Reference): name = Unicode('python') version = Unicode(sys.version.split()[0]) class KernelInfoReply(Reply): protocol_version = Version(min='5.0') implementation = Unicode('ipython') implementation_version = Version(min='2.1') language_info = Dict() banner = Unicode() def check(self, d): Reference.check(self, d) LanguageInfo().check(d['language_info']) class ConnectReply(Reference): shell_port = Integer() control_port = Integer() stdin_port = Integer() iopub_port = Integer() hb_port = Integer() class CommInfoReply(Reply): comms = Dict() class IsCompleteReply(Reference): status = Enum(('complete', 'incomplete', 'invalid', 'unknown'), default_value='complete') def check(self, d): Reference.check(self, d) if d['status'] == 'incomplete': IsCompleteReplyIncomplete().check(d) class IsCompleteReplyIncomplete(Reference): indent = Unicode() # IOPub messages class ExecuteInput(Reference): code = Unicode() execution_count = Integer() class Error(ExecuteReplyError): """Errors are the same as ExecuteReply, but without status""" status = None # no status field class Stream(Reference): name = Enum(('stdout', 'stderr'), default_value='stdout') text = Unicode() class DisplayData(MimeBundle): pass class ExecuteResult(MimeBundle): execution_count = Integer() class HistoryReply(Reply): history = List(List()) references = { 'execute_reply' : ExecuteReply(), 'inspect_reply' : InspectReply(), 'status' : Status(), 'complete_reply' : CompleteReply(), 'kernel_info_reply': KernelInfoReply(), 'connect_reply': ConnectReply(), 'comm_info_reply': CommInfoReply(), 'is_complete_reply': IsCompleteReply(), 'execute_input' : ExecuteInput(), 'execute_result' : ExecuteResult(), 'history_reply' : HistoryReply(), 'error' : Error(), 'stream' : Stream(), 'display_data' : DisplayData(), 'header' : RHeader(), } """ Specifications of `content` part of the reply messages. """ def validate_message(msg, msg_type=None, parent=None): """validate a message This is a generator, and must be iterated through to actually trigger each test. If msg_type and/or parent are given, the msg_type and/or parent msg_id are compared with the given values. """ RMessage().check(msg) if msg_type: assert msg['msg_type'] == msg_type if parent: assert msg['parent_header']['msg_id'] == parent content = msg['content'] ref = references[msg['msg_type']] ref.check(content) #----------------------------------------------------------------------------- # Tests #----------------------------------------------------------------------------- # Shell channel def test_execute(): flush_channels() msg_id = KC.execute(code='x=1') reply = get_reply(KC, msg_id, TIMEOUT) validate_message(reply, 'execute_reply', msg_id) def test_execute_silent(): flush_channels() msg_id, reply = execute(code='x=1', silent=True) # flush status=idle status = KC.get_iopub_msg(timeout=TIMEOUT) validate_message(status, 'status', msg_id) assert status['content']['execution_state'] == 'idle' with pytest.raises(Empty): KC.get_iopub_msg(timeout=0.1) count = reply['execution_count'] msg_id, reply = execute(code='x=2', silent=True) # flush status=idle status = KC.get_iopub_msg(timeout=TIMEOUT) validate_message(status, 'status', msg_id) assert status['content']['execution_state'] == 'idle' with pytest.raises(Empty): KC.get_iopub_msg(timeout=0.1) count_2 = reply['execution_count'] assert count_2 == count def test_execute_error(): flush_channels() msg_id, reply = execute(code='1/0') assert reply['status'] == 'error' assert reply['ename'] == 'ZeroDivisionError' error = KC.get_iopub_msg(timeout=TIMEOUT) validate_message(error, 'error', msg_id) def test_execute_inc(): """execute request should increment execution_count""" flush_channels() _, reply = execute(code="x=1") count = reply["execution_count"] flush_channels() _, reply = execute(code="x=2") count_2 = reply["execution_count"] assert count_2 == count + 1 def test_execute_stop_on_error(): """execute request should not abort execution queue with stop_on_error False""" flush_channels() fail = '\n'.join([ # sleep to ensure subsequent message is waiting in the queue to be aborted 'import time', 'time.sleep(0.5)', 'raise ValueError', ]) KC.execute(code=fail) KC.execute(code='print("Hello")') KC.get_shell_msg(timeout=TIMEOUT) reply = KC.get_shell_msg(timeout=TIMEOUT) assert reply['content']['status'] == 'aborted' flush_channels() KC.execute(code=fail, stop_on_error=False) KC.execute(code='print("Hello")') KC.get_shell_msg(timeout=TIMEOUT) reply = KC.get_shell_msg(timeout=TIMEOUT) assert reply['content']['status'] == 'ok' def test_non_execute_stop_on_error(): """test that non-execute_request's are not aborted after an error""" flush_channels() fail = '\n'.join([ # sleep to ensure subsequent message is waiting in the queue to be aborted 'import time', 'time.sleep(0.5)', 'raise ValueError', ]) KC.execute(code=fail) KC.kernel_info() KC.comm_info() KC.inspect(code="print") reply = KC.get_shell_msg(timeout=TIMEOUT) # execute assert reply['content']['status'] == 'error' reply = KC.get_shell_msg(timeout=TIMEOUT) # kernel_info assert reply['content']['status'] == 'ok' reply = KC.get_shell_msg(timeout=TIMEOUT) # comm_info assert reply['content']['status'] == 'ok' reply = KC.get_shell_msg(timeout=TIMEOUT) # inspect assert reply['content']['status'] == 'ok' def test_user_expressions(): flush_channels() msg_id, reply = execute(code='x=1', user_expressions=dict(foo='x+1')) user_expressions = reply['user_expressions'] assert user_expressions == {'foo': { 'status': 'ok', 'data': {'text/plain': '2'}, 'metadata': {}, }} def test_user_expressions_fail(): flush_channels() msg_id, reply = execute(code='x=0', user_expressions=dict(foo='nosuchname')) user_expressions = reply['user_expressions'] foo = user_expressions['foo'] assert foo['status'] == 'error' assert foo['ename'] == 'NameError' def test_oinfo(): flush_channels() msg_id = KC.inspect('a') reply = get_reply(KC, msg_id, TIMEOUT) validate_message(reply, 'inspect_reply', msg_id) def test_oinfo_found(): flush_channels() msg_id, reply = execute(code='a=5') msg_id = KC.inspect('a') reply = get_reply(KC, msg_id, TIMEOUT) validate_message(reply, 'inspect_reply', msg_id) content = reply['content'] assert content['found'] text = content['data']['text/plain'] assert 'Type:' in text assert 'Docstring:' in text def test_oinfo_detail(): flush_channels() msg_id, reply = execute(code='ip=get_ipython()') msg_id = KC.inspect('ip.object_inspect', cursor_pos=10, detail_level=1) reply = get_reply(KC, msg_id, TIMEOUT) validate_message(reply, 'inspect_reply', msg_id) content = reply['content'] assert content['found'] text = content['data']['text/plain'] assert 'Signature:' in text assert 'Source:' in text def test_oinfo_not_found(): flush_channels() msg_id = KC.inspect('dne') reply = get_reply(KC, msg_id, TIMEOUT) validate_message(reply, 'inspect_reply', msg_id) content = reply['content'] assert not content['found'] def test_complete(): flush_channels() msg_id, reply = execute(code="alpha = albert = 5") msg_id = KC.complete('al', 2) reply = get_reply(KC, msg_id, TIMEOUT) validate_message(reply, 'complete_reply', msg_id) matches = reply['content']['matches'] for name in ('alpha', 'albert'): assert name in matches def test_kernel_info_request(): flush_channels() msg_id = KC.kernel_info() reply = get_reply(KC, msg_id, TIMEOUT) validate_message(reply, 'kernel_info_reply', msg_id) def test_connect_request(): flush_channels() msg = KC.session.msg('connect_request') KC.shell_channel.send(msg) return msg['header']['msg_id'] msg_id = KC.kernel_info() reply = get_reply(KC, msg_id, TIMEOUT) validate_message(reply, 'connect_reply', msg_id) @pytest.mark.skipif( jupyter_client.version_info < (5, 0), reason="earlier Jupyter Client don't have comm_info", ) def test_comm_info_request(): flush_channels() msg_id = KC.comm_info() reply = get_reply(KC, msg_id, TIMEOUT) validate_message(reply, 'comm_info_reply', msg_id) def test_single_payload(): """ We want to test the set_next_input is not triggered several time per cell. This is (was ?) mostly due to the fact that `?` in a loop would trigger several set_next_input. I'm tempted to thing that we actually want to _allow_ multiple set_next_input (that's users' choice). But that `?` itself (and ?'s transform) should avoid setting multiple set_next_input). """ flush_channels() msg_id, reply = execute(code="ip = get_ipython()\n" "for i in range(3):\n" " ip.set_next_input('Hello There')\n") payload = reply['payload'] next_input_pls = [pl for pl in payload if pl["source"] == "set_next_input"] assert len(next_input_pls) == 1 def test_is_complete(): flush_channels() msg_id = KC.is_complete("a = 1") reply = get_reply(KC, msg_id, TIMEOUT) validate_message(reply, 'is_complete_reply', msg_id) def test_history_range(): flush_channels() KC.execute(code="x=1", store_history=True) KC.get_shell_msg(timeout=TIMEOUT) msg_id = KC.history(hist_access_type = 'range', raw = True, output = True, start = 1, stop = 2, session = 0) reply = get_reply(KC, msg_id, TIMEOUT) validate_message(reply, 'history_reply', msg_id) content = reply['content'] assert len(content['history']) == 1 def test_history_tail(): flush_channels() KC.execute(code="x=1", store_history=True) KC.get_shell_msg(timeout=TIMEOUT) msg_id = KC.history(hist_access_type = 'tail', raw = True, output = True, n = 1, session = 0) reply = get_reply(KC, msg_id, TIMEOUT) validate_message(reply, 'history_reply', msg_id) content = reply['content'] assert len(content['history']) == 1 def test_history_search(): flush_channels() KC.execute(code="x=1", store_history=True) KC.get_shell_msg(timeout=TIMEOUT) msg_id = KC.history(hist_access_type = 'search', raw = True, output = True, n = 1, pattern = '*', session = 0) reply = get_reply(KC, msg_id, TIMEOUT) validate_message(reply, 'history_reply', msg_id) content = reply['content'] assert len(content['history']) == 1 # IOPub channel def test_stream(): flush_channels() msg_id, reply = execute("print('hi')") stdout = KC.get_iopub_msg(timeout=TIMEOUT) validate_message(stdout, 'stream', msg_id) content = stdout['content'] assert content['text'] == 'hi\n' def test_display_data(): flush_channels() msg_id, reply = execute("from IPython.display import display; display(1)") display = KC.get_iopub_msg(timeout=TIMEOUT) validate_message(display, 'display_data', parent=msg_id) data = display['content']['data'] assert data['text/plain'] == '1' ipykernel-6.7.0/ipykernel/tests/test_pickleutil.py000066400000000000000000000022461417004153500224540ustar00rootroot00000000000000import pickle from ipykernel.pickleutil import can, uncan def interactive(f): f.__module__ = '__main__' return f def dumps(obj): return pickle.dumps(can(obj)) def loads(obj): return uncan(pickle.loads(obj)) def test_no_closure(): @interactive def foo(): a = 5 return a pfoo = dumps(foo) bar = loads(pfoo) assert foo() == bar() def test_generator_closure(): # this only creates a closure on Python 3 @interactive def foo(): i = 'i' r = [ i for j in (1,2) ] return r pfoo = dumps(foo) bar = loads(pfoo) assert foo() == bar() def test_nested_closure(): @interactive def foo(): i = 'i' def g(): return i return g() pfoo = dumps(foo) bar = loads(pfoo) assert foo() == bar() def test_closure(): i = 'i' @interactive def foo(): return i pfoo = dumps(foo) bar = loads(pfoo) assert foo() == bar() def test_uncan_bytes_buffer(): data = b'data' canned = can(data) canned.buffers = [memoryview(buf) for buf in canned.buffers] out = uncan(canned) assert out == data ipykernel-6.7.0/ipykernel/tests/test_start_kernel.py000066400000000000000000000035211417004153500230010ustar00rootroot00000000000000from .test_embed_kernel import setup_kernel from flaky import flaky from textwrap import dedent TIMEOUT = 15 @flaky(max_runs=3) def test_ipython_start_kernel_userns(): cmd = dedent( """ from ipykernel.kernelapp import launch_new_instance ns = {"tre": 123} launch_new_instance(user_ns=ns) """ ) with setup_kernel(cmd) as client: client.inspect("tre") msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content'] assert content['found'] text = content['data']['text/plain'] assert '123' in text # user_module should be an instance of DummyMod client.execute("usermod = get_ipython().user_module") msg = client.get_shell_msg(timeout=TIMEOUT) content = msg["content"] assert content["status"] == "ok" client.inspect("usermod") msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content'] assert content['found'] text = content['data']['text/plain'] assert 'DummyMod' in text @flaky(max_runs=3) def test_ipython_start_kernel_no_userns(): # Issue #4188 - user_ns should be passed to shell as None, not {} cmd = dedent( """ from ipykernel.kernelapp import launch_new_instance launch_new_instance() """ ) with setup_kernel(cmd) as client: # user_module should not be an instance of DummyMod client.execute("usermod = get_ipython().user_module") msg = client.get_shell_msg(timeout=TIMEOUT) content = msg["content"] assert content["status"] == "ok" client.inspect("usermod") msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content'] assert content['found'] text = content['data']['text/plain'] assert 'DummyMod' not in text ipykernel-6.7.0/ipykernel/tests/test_zmq_shell.py000066400000000000000000000131511417004153500223020ustar00rootroot00000000000000""" Tests for zmq shell / display publisher. """ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from queue import Queue from threading import Thread import unittest from traitlets import Int import zmq from ipykernel.zmqshell import ZMQDisplayPublisher from jupyter_client.session import Session class NoReturnDisplayHook: """ A dummy DisplayHook which allows us to monitor the number of times an object is called, but which does *not* return a message when it is called. """ call_count = 0 def __call__(self, obj): self.call_count += 1 class ReturnDisplayHook(NoReturnDisplayHook): """ A dummy DisplayHook with the same counting ability as its base class, but which also returns the same message when it is called. """ def __call__(self, obj): super().__call__(obj) return obj class CounterSession(Session): """ This is a simple subclass to allow us to count the calls made to the session object by the display publisher. """ send_count = Int(0) def send(self, *args, **kwargs): """ A trivial override to just augment the existing call with an increment to the send counter. """ self.send_count += 1 super().send(*args, **kwargs) class ZMQDisplayPublisherTests(unittest.TestCase): """ Tests the ZMQDisplayPublisher in zmqshell.py """ def setUp(self): self.context = zmq.Context() self.socket = self.context.socket(zmq.PUB) self.session = CounterSession() self.disp_pub = ZMQDisplayPublisher( session = self.session, pub_socket = self.socket ) def tearDown(self): """ We need to close the socket in order to proceed with the tests. TODO - There is still an open file handler to '/dev/null', presumably created by zmq. """ self.disp_pub.clear_output() self.socket.close() self.context.term() def test_display_publisher_creation(self): """ Since there's no explicit constructor, here we confirm that keyword args get assigned correctly, and override the defaults. """ assert self.disp_pub.session == self.session assert self.disp_pub.pub_socket == self.socket def test_thread_local_hooks(self): """ Confirms that the thread_local attribute is correctly initialised with an empty list for the display hooks """ assert self.disp_pub._hooks == [] def hook(msg): return msg self.disp_pub.register_hook(hook) assert self.disp_pub._hooks == [hook] q = Queue() def set_thread_hooks(): q.put(self.disp_pub._hooks) t = Thread(target=set_thread_hooks) t.start() thread_hooks = q.get(timeout=10) assert thread_hooks == [] def test_publish(self): """ Publish should prepare the message and eventually call `send` by default. """ data = dict(a = 1) assert self.session.send_count == 0 self.disp_pub.publish(data) assert self.session.send_count == 1 def test_display_hook_halts_send(self): """ If a hook is installed, and on calling the object it does *not* return a message, then we assume that the message has been consumed, and should not be processed (`sent`) in the normal manner. """ data = dict(a = 1) hook = NoReturnDisplayHook() self.disp_pub.register_hook(hook) assert hook.call_count == 0 assert self.session.send_count == 0 self.disp_pub.publish(data) assert hook.call_count == 1 assert self.session.send_count == 0 def test_display_hook_return_calls_send(self): """ If a hook is installed and on calling the object it returns a new message, then we assume that this is just a message transformation, and the message should be sent in the usual manner. """ data = dict(a=1) hook = ReturnDisplayHook() self.disp_pub.register_hook(hook) assert hook.call_count == 0 assert self.session.send_count == 0 self.disp_pub.publish(data) assert hook.call_count == 1 assert self.session.send_count == 1 def test_unregister_hook(self): """ Once a hook is unregistered, it should not be called during `publish`. """ data = dict(a = 1) hook = NoReturnDisplayHook() self.disp_pub.register_hook(hook) assert hook.call_count == 0 assert self.session.send_count == 0 self.disp_pub.publish(data) assert hook.call_count == 1 assert self.session.send_count == 0 # # After unregistering the `NoReturn` hook, any calls # to publish should *not* got through the DisplayHook, # but should instead hit the usual `session.send` call # at the end. # # As a result, the hook call count should *not* increase, # but the session send count *should* increase. # first = self.disp_pub.unregister_hook(hook) self.disp_pub.publish(data) self.assertTrue(first) assert hook.call_count == 1 assert self.session.send_count == 1 # # If a hook is not installed, `unregister_hook` # should return false. # second = self.disp_pub.unregister_hook(hook) self.assertFalse(second) if __name__ == '__main__': unittest.main() ipykernel-6.7.0/ipykernel/tests/utils.py000066400000000000000000000130571417004153500204120ustar00rootroot00000000000000"""utilities for testing IPython kernels""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import atexit import os import platform import sys from tempfile import TemporaryDirectory from time import time from contextlib import contextmanager from queue import Empty from subprocess import STDOUT from jupyter_client import manager STARTUP_TIMEOUT = 60 TIMEOUT = 100 KM = None KC = None def start_new_kernel(**kwargs): """start a new kernel, and return its Manager and Client Integrates with our output capturing for tests. """ kwargs['stderr'] = STDOUT try: import nose kwargs['stdout'] = nose.iptest_stdstreams_fileno() except (ImportError, AttributeError): pass return manager.start_new_kernel(startup_timeout=STARTUP_TIMEOUT, **kwargs) def flush_channels(kc=None): """flush any messages waiting on the queue""" from .test_message_spec import validate_message if kc is None: kc = KC for get_msg in (kc.get_shell_msg, kc.get_iopub_msg): while True: try: msg = get_msg(timeout=0.1) except Empty: break else: validate_message(msg) def get_reply(kc, msg_id, timeout=TIMEOUT, channel='shell'): t0 = time() while True: get_msg = getattr(kc, f'get_{channel}_msg') reply = get_msg(timeout=timeout) if reply['parent_header']['msg_id'] == msg_id: break # Allow debugging ignored replies print(f"Ignoring reply not to {msg_id}: {reply}") t1 = time() timeout -= t1 - t0 t0 = t1 return reply def execute(code='', kc=None, **kwargs): """wrapper for doing common steps for validating an execution request""" from .test_message_spec import validate_message if kc is None: kc = KC msg_id = kc.execute(code=code, **kwargs) reply = get_reply(kc, msg_id, TIMEOUT) validate_message(reply, 'execute_reply', msg_id) busy = kc.get_iopub_msg(timeout=TIMEOUT) validate_message(busy, 'status', msg_id) assert busy['content']['execution_state'] == 'busy' if not kwargs.get('silent'): execute_input = kc.get_iopub_msg(timeout=TIMEOUT) validate_message(execute_input, 'execute_input', msg_id) assert execute_input['content']['code'] == code # show tracebacks if present for debugging if reply['content'].get('traceback'): print('\n'.join(reply['content']['traceback']), file=sys.stderr) return msg_id, reply['content'] def start_global_kernel(): """start the global kernel (if it isn't running) and return its client""" global KM, KC if KM is None: KM, KC = start_new_kernel() atexit.register(stop_global_kernel) else: flush_channels(KC) return KC @contextmanager def kernel(): """Context manager for the global kernel instance Should be used for most kernel tests Returns ------- kernel_client: connected KernelClient instance """ yield start_global_kernel() def uses_kernel(test_f): """Decorator for tests that use the global kernel""" def wrapped_test(): with kernel() as kc: test_f(kc) wrapped_test.__doc__ = test_f.__doc__ wrapped_test.__name__ = test_f.__name__ return wrapped_test def stop_global_kernel(): """Stop the global shared kernel instance, if it exists""" global KM, KC KC.stop_channels() KC = None if KM is None: return KM.shutdown_kernel(now=True) KM = None def new_kernel(argv=None): """Context manager for a new kernel in a subprocess Should only be used for tests where the kernel must not be re-used. Returns ------- kernel_client: connected KernelClient instance """ kwargs = {'stderr': STDOUT} try: import nose kwargs['stdout'] = nose.iptest_stdstreams_fileno() except (ImportError, AttributeError): pass if argv is not None: kwargs['extra_arguments'] = argv return manager.run_kernel(**kwargs) def assemble_output(get_msg): """assemble stdout/err from an execution""" stdout = '' stderr = '' while True: msg = get_msg(timeout=1) msg_type = msg['msg_type'] content = msg['content'] if msg_type == 'status' and content['execution_state'] == 'idle': # idle message signals end of output break elif msg['msg_type'] == 'stream': if content['name'] == 'stdout': stdout += content['text'] elif content['name'] == 'stderr': stderr += content['text'] else: raise KeyError("bad stream: %r" % content['name']) else: # other output, ignored pass return stdout, stderr def wait_for_idle(kc): while True: msg = kc.get_iopub_msg(timeout=1) msg_type = msg['msg_type'] content = msg['content'] if msg_type == 'status' and content['execution_state'] == 'idle': break class TemporaryWorkingDirectory(TemporaryDirectory): """ Creates a temporary directory and sets the cwd to that directory. Automatically reverts to previous cwd upon cleanup. Usage example: with TemporaryWorkingDirectory() as tmpdir: ... """ def __enter__(self): self.old_wd = os.getcwd() os.chdir(self.name) return super().__enter__() def __exit__(self, exc, value, tb): os.chdir(self.old_wd) return super().__exit__(exc, value, tb) ipykernel-6.7.0/ipykernel/trio_runner.py000066400000000000000000000040221417004153500204460ustar00rootroot00000000000000import builtins import logging import signal import threading import traceback import warnings import trio class TrioRunner: def __init__(self): self._cell_cancel_scope = None self._trio_token = None def initialize(self, kernel, io_loop): kernel.shell.set_trio_runner(self) kernel.shell.run_line_magic('autoawait', 'trio') kernel.shell.magics_manager.magics['line']['autoawait'] = \ lambda _: warnings.warn("Autoawait isn't allowed in Trio " "background loop mode.") bg_thread = threading.Thread(target=io_loop.start, daemon=True, name='TornadoBackground') bg_thread.start() def interrupt(self, signum, frame): if self._cell_cancel_scope: self._cell_cancel_scope.cancel() else: raise Exception('Kernel interrupted but no cell is running') def run(self): old_sig = signal.signal(signal.SIGINT, self.interrupt) def log_nursery_exc(exc): exc = '\n'.join(traceback.format_exception(type(exc), exc, exc.__traceback__)) logging.error('An exception occurred in a global nursery task.\n%s', exc) async def trio_main(): self._trio_token = trio.lowlevel.current_trio_token() async with trio.open_nursery() as nursery: # TODO This hack prevents the nursery from cancelling all child # tasks when an uncaught exception occurs, but it's ugly. nursery._add_exc = log_nursery_exc builtins.GLOBAL_NURSERY = nursery await trio.sleep_forever() trio.run(trio_main) signal.signal(signal.SIGINT, old_sig) def __call__(self, async_fn): async def loc(coro): self._cell_cancel_scope = trio.CancelScope() with self._cell_cancel_scope: return await coro self._cell_cancel_scope = None return trio.from_thread.run(loc, async_fn, trio_token=self._trio_token) ipykernel-6.7.0/ipykernel/zmqshell.py000066400000000000000000000550311417004153500177450ustar00rootroot00000000000000"""A ZMQ-based subclass of InteractiveShell. This code is meant to ease the refactoring of the base InteractiveShell into something with a cleaner architecture for 2-process use, without actually breaking InteractiveShell itself. So we're doing something a bit ugly, where we subclass and override what we want to fix. Once this is working well, we can go back to the base class and refactor the code for a cleaner inheritance implementation that doesn't rely on so much monkeypatching. But this lets us maintain a fully working IPython as we develop the new machinery. This should thus be thought of as scaffolding. """ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import os import sys import warnings from threading import local from IPython.core.interactiveshell import ( InteractiveShell, InteractiveShellABC ) from IPython.core import page from IPython.core.autocall import ZMQExitAutocall from IPython.core.displaypub import DisplayPublisher from IPython.core.error import UsageError from IPython.core.magics import MacroToEdit, CodeMagics from IPython.core.magic import magics_class, line_magic, Magics from IPython.core import payloadpage from IPython.core.usage import default_banner from IPython.display import display, Javascript from ipykernel import ( get_connection_file, get_connection_info, connect_qtconsole ) from IPython.utils import openpy from ipykernel.jsonutil import json_clean, encode_images from IPython.utils.process import arg_split, system from traitlets import ( Instance, Type, Dict, CBool, CBytes, Any, default, observe ) from ipykernel.displayhook import ZMQShellDisplayHook from jupyter_core.paths import jupyter_runtime_dir from jupyter_client.session import extract_header, Session #----------------------------------------------------------------------------- # Functions and classes #----------------------------------------------------------------------------- class ZMQDisplayPublisher(DisplayPublisher): """A display publisher that publishes data using a ZeroMQ PUB socket.""" session = Instance(Session, allow_none=True) pub_socket = Any(allow_none=True) parent_header = Dict({}) topic = CBytes(b'display_data') # thread_local: # An attribute used to ensure the correct output message # is processed. See ipykernel Issue 113 for a discussion. _thread_local = Any() def set_parent(self, parent): """Set the parent for outbound messages.""" self.parent_header = extract_header(parent) def _flush_streams(self): """flush IO Streams prior to display""" sys.stdout.flush() sys.stderr.flush() @default('_thread_local') def _default_thread_local(self): """Initialize our thread local storage""" return local() @property def _hooks(self): if not hasattr(self._thread_local, 'hooks'): # create new list for a new thread self._thread_local.hooks = [] return self._thread_local.hooks def publish( self, data, metadata=None, transient=None, update=False, ): """Publish a display-data message Parameters ---------- data : dict A mime-bundle dict, keyed by mime-type. metadata : dict, optional Metadata associated with the data. transient : dict, optional, keyword-only Transient data that may only be relevant during a live display, such as display_id. Transient data should not be persisted to documents. update : bool, optional, keyword-only If True, send an update_display_data message instead of display_data. """ self._flush_streams() if metadata is None: metadata = {} if transient is None: transient = {} self._validate_data(data, metadata) content = {} content['data'] = encode_images(data) content['metadata'] = metadata content['transient'] = transient msg_type = 'update_display_data' if update else 'display_data' # Use 2-stage process to send a message, # in order to put it through the transform # hooks before potentially sending. msg = self.session.msg( msg_type, json_clean(content), parent=self.parent_header ) # Each transform either returns a new # message or None. If None is returned, # the message has been 'used' and we return. for hook in self._hooks: msg = hook(msg) if msg is None: return self.session.send( self.pub_socket, msg, ident=self.topic, ) def clear_output(self, wait=False): """Clear output associated with the current execution (cell). Parameters ---------- wait : bool (default: False) If True, the output will not be cleared immediately, instead waiting for the next display before clearing. This reduces bounce during repeated clear & display loops. """ content = dict(wait=wait) self._flush_streams() self.session.send( self.pub_socket, 'clear_output', content, parent=self.parent_header, ident=self.topic, ) def register_hook(self, hook): """ Registers a hook with the thread-local storage. Parameters ---------- hook : Any callable object Returns ------- Either a publishable message, or `None`. The DisplayHook objects must return a message from the __call__ method if they still require the `session.send` method to be called after transformation. Returning `None` will halt that execution path, and session.send will not be called. """ self._hooks.append(hook) def unregister_hook(self, hook): """ Un-registers a hook with the thread-local storage. Parameters ---------- hook : Any callable object which has previously been registered as a hook. Returns ------- bool - `True` if the hook was removed, `False` if it wasn't found. """ try: self._hooks.remove(hook) return True except ValueError: return False @magics_class class KernelMagics(Magics): #------------------------------------------------------------------------ # Magic overrides #------------------------------------------------------------------------ # Once the base class stops inheriting from magic, this code needs to be # moved into a separate machinery as well. For now, at least isolate here # the magics which this class needs to implement differently from the base # class, or that are unique to it. _find_edit_target = CodeMagics._find_edit_target @line_magic def edit(self, parameter_s='', last_call=['','']): """Bring up an editor and execute the resulting code. Usage: %edit [options] [args] %edit runs an external text editor. You will need to set the command for this editor via the ``TerminalInteractiveShell.editor`` option in your configuration file before it will work. This command allows you to conveniently edit multi-line code right in your IPython session. If called without arguments, %edit opens up an empty editor with a temporary file and will execute the contents of this file when you close it (don't forget to save it!). Options: -n Open the editor at a specified line number. By default, the IPython editor hook uses the unix syntax 'editor +N filename', but you can configure this by providing your own modified hook if your favorite editor supports line-number specifications with a different syntax. -p Call the editor with the same data as the previous time it was used, regardless of how long ago (in your current session) it was. -r Use 'raw' input. This option only applies to input taken from the user's history. By default, the 'processed' history is used, so that magics are loaded in their transformed version to valid Python. If this option is given, the raw input as typed as the command line is used instead. When you exit the editor, it will be executed by IPython's own processor. Arguments: If arguments are given, the following possibilities exist: - The arguments are numbers or pairs of colon-separated numbers (like 1 4:8 9). These are interpreted as lines of previous input to be loaded into the editor. The syntax is the same of the %macro command. - If the argument doesn't start with a number, it is evaluated as a variable and its contents loaded into the editor. You can thus edit any string which contains python code (including the result of previous edits). - If the argument is the name of an object (other than a string), IPython will try to locate the file where it was defined and open the editor at the point where it is defined. You can use ``%edit function`` to load an editor exactly at the point where 'function' is defined, edit it and have the file be executed automatically. If the object is a macro (see %macro for details), this opens up your specified editor with a temporary file containing the macro's data. Upon exit, the macro is reloaded with the contents of the file. Note: opening at an exact line is only supported under Unix, and some editors (like kedit and gedit up to Gnome 2.8) do not understand the '+NUMBER' parameter necessary for this feature. Good editors like (X)Emacs, vi, jed, pico and joe all do. - If the argument is not found as a variable, IPython will look for a file with that name (adding .py if necessary) and load it into the editor. It will execute its contents with execfile() when you exit, loading any code in the file into your interactive namespace. Unlike in the terminal, this is designed to use a GUI editor, and we do not know when it has closed. So the file you edit will not be automatically executed or printed. Note that %edit is also available through the alias %ed. """ opts,args = self.parse_options(parameter_s, 'prn:') try: filename, lineno, _ = CodeMagics._find_edit_target(self.shell, args, opts, last_call) except MacroToEdit: # TODO: Implement macro editing over 2 processes. print("Macro editing not yet implemented in 2-process model.") return # Make sure we send to the client an absolute path, in case the working # directory of client and kernel don't match filename = os.path.abspath(filename) payload = { 'source' : 'edit_magic', 'filename' : filename, 'line_number' : lineno } self.shell.payload_manager.write_payload(payload) # A few magics that are adapted to the specifics of using pexpect and a # remote terminal @line_magic def clear(self, arg_s): """Clear the terminal.""" if os.name == 'posix': self.shell.system("clear") else: self.shell.system("cls") if os.name == 'nt': # This is the usual name in windows cls = line_magic('cls')(clear) # Terminal pagers won't work over pexpect, but we do have our own pager @line_magic def less(self, arg_s): """Show a file through the pager. Files ending in .py are syntax-highlighted.""" if not arg_s: raise UsageError('Missing filename.') if arg_s.endswith('.py'): cont = self.shell.pycolorize(openpy.read_py_file(arg_s, skip_encoding_cookie=False)) else: cont = open(arg_s).read() page.page(cont) more = line_magic('more')(less) # Man calls a pager, so we also need to redefine it if os.name == 'posix': @line_magic def man(self, arg_s): """Find the man page for the given command and display in pager.""" page.page(self.shell.getoutput('man %s | col -b' % arg_s, split=False)) @line_magic def connect_info(self, arg_s): """Print information for connecting other clients to this kernel It will print the contents of this session's connection file, as well as shortcuts for local clients. In the simplest case, when called from the most recently launched kernel, secondary clients can be connected, simply with: $> jupyter --existing """ try: connection_file = get_connection_file() info = get_connection_info(unpack=False) except Exception as e: warnings.warn("Could not get connection info: %r" % e) return # if it's in the default dir, truncate to basename if jupyter_runtime_dir() == os.path.dirname(connection_file): connection_file = os.path.basename(connection_file) print (info + '\n') print ( f"Paste the above JSON into a file, and connect with:\n" f" $> jupyter --existing \n" f"or, if you are local, you can connect with just:\n" f" $> jupyter --existing {connection_file}\n" f"or even just:\n" f" $> jupyter --existing\n" f"if this is the most recent Jupyter kernel you have started." ) @line_magic def qtconsole(self, arg_s): """Open a qtconsole connected to this kernel. Useful for connecting a qtconsole to running notebooks, for better debugging. """ # %qtconsole should imply bind_kernel for engines: # FIXME: move to ipyparallel Kernel subclass if 'ipyparallel' in sys.modules: from ipyparallel import bind_kernel bind_kernel() try: connect_qtconsole(argv=arg_split(arg_s, os.name=='posix')) except Exception as e: warnings.warn("Could not start qtconsole: %r" % e) return @line_magic def autosave(self, arg_s): """Set the autosave interval in the notebook (in seconds). The default value is 120, or two minutes. ``%autosave 0`` will disable autosave. This magic only has an effect when called from the notebook interface. It has no effect when called in a startup file. """ try: interval = int(arg_s) except ValueError as e: raise UsageError("%%autosave requires an integer, got %r" % arg_s) from e # javascript wants milliseconds milliseconds = 1000 * interval display(Javascript("IPython.notebook.set_autosave_interval(%i)" % milliseconds), include=['application/javascript'] ) if interval: print("Autosaving every %i seconds" % interval) else: print("Autosave disabled") class ZMQInteractiveShell(InteractiveShell): """A subclass of InteractiveShell for ZMQ.""" displayhook_class = Type(ZMQShellDisplayHook) display_pub_class = Type(ZMQDisplayPublisher) data_pub_class = Any() kernel = Any() parent_header = Any() @default('banner1') def _default_banner1(self): return default_banner # Override the traitlet in the parent class, because there's no point using # readline for the kernel. Can be removed when the readline code is moved # to the terminal frontend. readline_use = CBool(False) # autoindent has no meaning in a zmqshell, and attempting to enable it # will print a warning in the absence of readline. autoindent = CBool(False) exiter = Instance(ZMQExitAutocall) @default('exiter') def _default_exiter(self): return ZMQExitAutocall(self) @observe('exit_now') def _update_exit_now(self, change): """stop eventloop when exit_now fires""" if change['new']: if hasattr(self.kernel, 'io_loop'): loop = self.kernel.io_loop loop.call_later(0.1, loop.stop) if self.kernel.eventloop: exit_hook = getattr(self.kernel.eventloop, 'exit_hook', None) if exit_hook: exit_hook(self.kernel) keepkernel_on_exit = None # Over ZeroMQ, GUI control isn't done with PyOS_InputHook as there is no # interactive input being read; we provide event loop support in ipkernel def enable_gui(self, gui): from .eventloops import enable_gui as real_enable_gui try: real_enable_gui(gui) self.active_eventloop = gui except ValueError as e: raise UsageError("%s" % e) from e def init_environment(self): """Configure the user's environment.""" env = os.environ # These two ensure 'ls' produces nice coloring on BSD-derived systems env['TERM'] = 'xterm-color' env['CLICOLOR'] = '1' # Since normal pagers don't work at all (over pexpect we don't have # single-key control of the subprocess), try to disable paging in # subprocesses as much as possible. env['PAGER'] = 'cat' env['GIT_PAGER'] = 'cat' def init_hooks(self): super().init_hooks() self.set_hook('show_in_pager', page.as_hook(payloadpage.page), 99) def init_data_pub(self): """Delay datapub init until request, for deprecation warnings""" pass @property def data_pub(self): if not hasattr(self, '_data_pub'): warnings.warn("InteractiveShell.data_pub is deprecated outside IPython parallel.", DeprecationWarning, stacklevel=2) self._data_pub = self.data_pub_class(parent=self) self._data_pub.session = self.display_pub.session self._data_pub.pub_socket = self.display_pub.pub_socket return self._data_pub @data_pub.setter def data_pub(self, pub): self._data_pub = pub def ask_exit(self): """Engage the exit actions.""" self.exit_now = (not self.keepkernel_on_exit) payload = dict( source='ask_exit', keepkernel=self.keepkernel_on_exit, ) self.payload_manager.write_payload(payload) def run_cell(self, *args, **kwargs): self._last_traceback = None return super().run_cell(*args, **kwargs) def _showtraceback(self, etype, evalue, stb): # try to preserve ordering of tracebacks and print statements sys.stdout.flush() sys.stderr.flush() exc_content = { 'traceback' : stb, 'ename' : str(etype.__name__), 'evalue' : str(evalue), } dh = self.displayhook # Send exception info over pub socket for other clients than the caller # to pick up topic = None if dh.topic: topic = dh.topic.replace(b'execute_result', b'error') dh.session.send( dh.pub_socket, "error", json_clean(exc_content), dh.parent_header, ident=topic, ) # FIXME - Once we rely on Python 3, the traceback is stored on the # exception object, so we shouldn't need to store it here. self._last_traceback = stb def set_next_input(self, text, replace=False): """Send the specified text to the frontend to be presented at the next input cell.""" payload = dict( source='set_next_input', text=text, replace=replace, ) self.payload_manager.write_payload(payload) def set_parent(self, parent): """Set the parent header for associating output with its triggering input""" self.parent_header = parent self.displayhook.set_parent(parent) self.display_pub.set_parent(parent) if hasattr(self, '_data_pub'): self.data_pub.set_parent(parent) try: sys.stdout.set_parent(parent) except AttributeError: pass try: sys.stderr.set_parent(parent) except AttributeError: pass def get_parent(self): return self.parent_header def init_magics(self): super().init_magics() self.register_magics(KernelMagics) self.magics_manager.register_alias('ed', 'edit') def init_virtualenv(self): # Overridden not to do virtualenv detection, because it's probably # not appropriate in a kernel. To use a kernel in a virtualenv, install # it inside the virtualenv. # https://ipython.readthedocs.io/en/latest/install/kernel_install.html pass def system_piped(self, cmd): """Call the given cmd in a subprocess, piping stdout/err Parameters ---------- cmd : str Command to execute (can not end in '&', as background processes are not supported. Should not be a command that expects input other than simple text. """ if cmd.rstrip().endswith('&'): # this is *far* from a rigorous test # We do not support backgrounding processes because we either use # pexpect or pipes to read from. Users can always just call # os.system() or use ip.system=ip.system_raw # if they really want a background process. raise OSError("Background processes not supported.") # we explicitly do NOT return the subprocess status code, because # a non-None value would trigger :func:`sys.displayhook` calls. # Instead, we store the exit_code in user_ns. # Also, protect system call from UNC paths on Windows here too # as is done in InteractiveShell.system_raw if sys.platform == 'win32': cmd = self.var_expand(cmd, depth=1) from IPython.utils._process_win32 import AvoidUNCPath with AvoidUNCPath() as path: if path is not None: cmd = 'pushd %s &&%s' % (path, cmd) self.user_ns['_exit_code'] = system(cmd) else: self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1)) # Ensure new system_piped implementation is used system = system_piped InteractiveShellABC.register(ZMQInteractiveShell) ipykernel-6.7.0/ipykernel_launcher.py000066400000000000000000000007031417004153500177630ustar00rootroot00000000000000"""Entry point for launching an IPython kernel. This is separate from the ipykernel package so we can avoid doing imports until after removing the cwd from sys.path. """ import sys if __name__ == '__main__': # Remove the CWD from sys.path while we load stuff. # This is added back by InteractiveShellApp.init_path() if sys.path[0] == '': del sys.path[0] from ipykernel import kernelapp as app app.launch_new_instance() ipykernel-6.7.0/pyproject.toml000066400000000000000000000010411417004153500164360ustar00rootroot00000000000000[build-system] build-backend = "setuptools.build_meta" requires=[ "setuptools", "wheel", "debugpy", "ipython>=5", "jupyter_core>=4.2", "jupyter_client", ] [tool.check-manifest] ignore = [] [tool.jupyter-releaser] skip = ["check-links"] [tool.tbump.version] current = "6.7.0" regex = ''' (?P\d+)\.(?P\d+)\.(?P\d+) ((?Pa|b|rc|.dev)(?P\d+))? ''' [tool.tbump.git] message_template = "Bump to {new_version}" tag_template = "v{new_version}" [[tool.tbump.file]] src = "ipykernel/_version.py" ipykernel-6.7.0/readthedocs.yml000066400000000000000000000001241417004153500165330ustar00rootroot00000000000000python: version: 3.8 pip_install: true requirements_file: docs/requirements.txt ipykernel-6.7.0/setup.cfg000066400000000000000000000027221417004153500153520ustar00rootroot00000000000000 [bdist_wheel] universal=0 [metadata] license_file = COPYING.md version = attr: ipykernel._version.__version__ [flake8] # References: # https://flake8.readthedocs.io/en/latest/user/configuration.html # https://flake8.readthedocs.io/en/latest/user/error-codes.html # https://pycodestyle.pycqa.org/en/latest/intro.html#error-codes exclude = __init__.py,versioneer.py ignore = E20, # Extra space in brackets E122, # continuation line missing indentation or outdented E124, # closing bracket does not match visual indentation E128,E127,E126 # continuation line over/under-indented for visual indent E121,E125, # continuation line with same indent as next logical line E226, # missing whitespace around arithmetic operator E231,E241, # Multiple spaces around "," E211, # whitespace before '(' E221,E225,E228 # missing whitespace around operator E271, # multiple spaces after keyword E301,E303,E305,E306 # expected X blank lines E26, # Comments E251 # unexpected spaces around keyword / parameter equals E302 # expected 2 blank lines, found 1 E4, # Import formatting E721, # Comparing types instead of isinstance E731, # Assigning lambda expression E741, # Ambiguous variable names W293, # blank line contains whitespace W503, # line break before binary operator W504, # line break after binary operator F811, # redefinition of unused 'loop' from line 10 max-line-length = 120 ipykernel-6.7.0/setup.py000066400000000000000000000067371417004153500152550ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import sys from glob import glob import os import shutil from setuptools import setup from setuptools.command.bdist_egg import bdist_egg # the name of the package name = 'ipykernel' class bdist_egg_disabled(bdist_egg): """Disabled version of bdist_egg Prevents setup.py install from performing setuptools' default easy_install, which it should never ever do. """ def run(self): sys.exit("Aborting implicit building of eggs. Use `pip install .` to install from source.") pjoin = os.path.join here = os.path.abspath(os.path.dirname(__file__)) pkg_root = pjoin(here, name) packages = [] for d, _, _ in os.walk(pjoin(here, name)): if os.path.exists(pjoin(d, '__init__.py')): packages.append(d[len(here)+1:].replace(os.path.sep, '.')) package_data = { 'ipykernel': ['resources/*.*'], } with open(pjoin(here, 'README.md')) as fid: LONG_DESCRIPTION = fid.read() setup_args = dict( name=name, cmdclass={ 'bdist_egg': bdist_egg if 'bdist_egg' in sys.argv else bdist_egg_disabled, }, scripts=glob(pjoin('scripts', '*')), packages=packages, py_modules=['ipykernel_launcher'], package_data=package_data, description="IPython Kernel for Jupyter", long_description_content_type="text/markdown", author='IPython Development Team', author_email='ipython-dev@scipy.org', url='https://ipython.org', license='BSD', long_description=LONG_DESCRIPTION, platforms="Linux, Mac OS X, Windows", keywords=['Interactive', 'Interpreter', 'Shell', 'Web'], python_requires='>=3.7', install_requires=[ 'debugpy>=1.0.0,<2.0', 'ipython>=7.23.1', 'traitlets>=5.1.0,<6.0', 'jupyter_client<8.0', 'tornado>=4.2,<7.0', 'matplotlib-inline>=0.1.0,<0.2.0', 'appnope;platform_system=="Darwin"', 'nest_asyncio', ], extras_require={ "test": [ "pytest !=5.3.4", "pytest-cov", "flaky", "ipyparallel", ], }, classifiers=[ 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9' ], ) if any(a.startswith(('bdist', 'install')) for a in sys.argv): sys.path.insert(0, here) from ipykernel.kernelspec import write_kernel_spec, make_ipkernel_cmd, KERNEL_NAME # When building a wheel, the executable specified in the kernelspec is simply 'python'. if any(a.startswith('bdist') for a in sys.argv): argv = make_ipkernel_cmd(executable='python') # When installing from source, the full `sys.executable` can be used. if any(a.startswith('install') for a in sys.argv): argv = make_ipkernel_cmd() dest = os.path.join(here, 'data_kernelspec') if os.path.exists(dest): shutil.rmtree(dest) write_kernel_spec(dest, overrides={'argv': argv}) setup_args['data_files'] = [ ( pjoin('share', 'jupyter', 'kernels', KERNEL_NAME), glob(pjoin('data_kernelspec', '*')), ) ] if __name__ == '__main__': setup(**setup_args)