pax_global_header00006660000000000000000000000064136355001440014513gustar00rootroot0000000000000052 comment=5cc538a4b02136569799e1d3394a58302ace53e8 ipykernel-5.2.0/000077500000000000000000000000001363550014400135215ustar00rootroot00000000000000ipykernel-5.2.0/.gitignore000066400000000000000000000005411363550014400155110ustar00rootroot00000000000000MANIFEST build cover dist _build docs/man/*.gz docs/source/api/generated docs/source/config/options docs/source/interactive/magics-generated.txt docs/gh-pages IPython/html/notebook/static/mathjax IPython/html/static/style/*.map *.py[co] __pycache__ *.egg-info *~ *.bak .ipynb_checkpoints .tox .DS_Store \#*# .#* .coverage data_kernelspec .pytest_cache ipykernel-5.2.0/.mailmap000066400000000000000000000250671363550014400151540ustar00rootroot00000000000000A. J. Holyoake ajholyoake Aaron Culich Aaron Culich Aron Ahmadia ahmadia Benjamin Ragan-Kelley Benjamin Ragan-Kelley Min RK Benjamin Ragan-Kelley MinRK Barry Wark Barry Wark Ben Edwards Ben Edwards Bradley M. Froehle Bradley M. Froehle Bradley M. Froehle Bradley Froehle Brandon Parsons Brandon Parsons Brian E. Granger Brian Granger Brian E. Granger Brian Granger <> Brian E. Granger bgranger <> Brian E. Granger bgranger Christoph Gohlke cgohlke Cyrille Rossant rossant Damián Avila damianavila Damián Avila damianavila Damon Allen damontallen Darren Dale darren.dale <> Darren Dale Darren Dale <> Dav Clark Dav Clark <> Dav Clark Dav Clark David Hirschfeld dhirschfeld David P. Sanders David P. Sanders David Warde-Farley David Warde-Farley <> Doug Blank Doug Blank Eugene Van den Bulke Eugene Van den Bulke Evan Patterson Evan Patterson Evan Patterson Evan Patterson Evan Patterson epatters Evan Patterson epatters Ernie French Ernie French Ernie French ernie french Ernie French ernop Fernando Perez Fernando Perez Fernando Perez Fernando Perez fperez <> Fernando Perez fptest <> Fernando Perez fptest1 <> Fernando Perez Fernando Perez Fernando Perez Fernando Perez <> Fernando Perez Fernando Perez Frank Murphy Frank Murphy Gabriel Becker gmbecker Gael Varoquaux gael.varoquaux <> Gael Varoquaux gvaroquaux Gael Varoquaux Gael Varoquaux <> Ingolf Becker watercrossing Jake Vanderplas Jake Vanderplas Jakob Gager jakobgager Jakob Gager jakobgager Jakob Gager jakobgager Jason Grout Jason Grout Jason Gors jason gors Jason Gors jgors Jens Hedegaard Nielsen Jens Hedegaard Nielsen Jens Hedegaard Nielsen Jens H Nielsen Jens Hedegaard Nielsen Jens H. Nielsen Jez Ng Jez Ng Jonathan Frederic Jonathan Frederic Jonathan Frederic Jonathan Frederic Jonathan Frederic Jonathan Frederic Jonathan Frederic jon Jonathan Frederic U-Jon-PC\Jon Jonathan March Jonathan March Jonathan March jdmarch Jörgen Stenarson Jörgen Stenarson Jörgen Stenarson Jorgen Stenarson Jörgen Stenarson Jorgen Stenarson <> Jörgen Stenarson jstenar Jörgen Stenarson jstenar <> Jörgen Stenarson Jörgen Stenarson Juergen Hasch juhasch Juergen Hasch juhasch Julia Evans Julia Evans Kester Tong KesterTong Kyle Kelley Kyle Kelley Kyle Kelley rgbkrk Laurent Dufréchou Laurent Dufréchou Laurent Dufréchou laurent dufrechou <> Laurent Dufréchou laurent.dufrechou <> Laurent Dufréchou Laurent Dufrechou <> Laurent Dufréchou laurent.dufrechou@gmail.com <> Laurent Dufréchou ldufrechou Lorena Pantano Lorena Luis Pedro Coelho Luis Pedro Coelho Marc Molla marcmolla Martín Gaitán Martín Gaitán Matthias Bussonnier Matthias BUSSONNIER Matthias Bussonnier Bussonnier Matthias Matthias Bussonnier Matthias BUSSONNIER Matthias Bussonnier Matthias Bussonnier Michael Droettboom Michael Droettboom Nicholas Bollweg Nicholas Bollweg (Nick) Nicolas Rougier Nikolay Koldunov Nikolay Koldunov Omar Andrés Zapata Mesa Omar Andres Zapata Mesa Omar Andrés Zapata Mesa Omar Andres Zapata Mesa Pankaj Pandey Pankaj Pandey Pascal Schetelat pascal-schetelat Paul Ivanov Paul Ivanov Pauli Virtanen Pauli Virtanen <> Pauli Virtanen Pauli Virtanen Pierre Gerold Pierre Gerold Pietro Berkes Pietro Berkes Piti Ongmongkolkul piti118 Prabhu Ramachandran Prabhu Ramachandran <> Puneeth Chaganti Puneeth Chaganti Robert Kern rkern <> Robert Kern Robert Kern Robert Kern Robert Kern Robert Kern Robert Kern <> Robert Marchman Robert Marchman Satrajit Ghosh Satrajit Ghosh Satrajit Ghosh Satrajit Ghosh Scott Sanderson Scott Sanderson smithj1 smithj1 smithj1 smithj1 Steven Johnson stevenJohnson Steven Silvester blink1073 S. Weber s8weber Stefan van der Walt Stefan van der Walt Silvia Vinyes Silvia Silvia Vinyes silviav12 Sylvain Corlay Sylvain Corlay sylvain.corlay Ted Drain TD22057 Théophile Studer Théophile Studer Thomas Kluyver Thomas Thomas Spura Thomas Spura Timo Paulssen timo vds vds2212 vds vds Ville M. Vainio Ville M. Vainio ville Ville M. Vainio ville Ville M. Vainio vivainio <> Ville M. Vainio Ville M. Vainio Ville M. Vainio Ville M. Vainio Walter Doerwald walter.doerwald <> Walter Doerwald Walter Doerwald <> W. Trevor King W. Trevor King Yoval P. y-p ipykernel-5.2.0/.travis.yml000066400000000000000000000025071363550014400156360ustar00rootroot00000000000000language: python matrix: include: - arch: arm64 python: "nightly" dist: bionic - arch: amd64 python: "nightly" - arch: arm64 python: 3.5 - arch: amd64 python: 3.5 - arch: arm64 python: 3.6 - arch: amd64 python: 3.6 - arch: arm64 python: 3.7 - arch: amd64 python: 3.7 - arch: arm64 python: 3.8 - arch: amd64 python: 3.8 sudo: false dist: xenial install: - | # pip install pip install --upgrade setuptools pip pip install --pre --upgrade --upgrade-strategy=eager .[test] codecov - | # install matplotlib if [[ "$TRAVIS_PYTHON_VERSION" == "3.6" ]]; then pip install matplotlib curio trio fi - | # pin tornado if [[ ! -z "$TORNADO" ]]; then pip install tornado=="$TORNADO" fi - | # pin IPython if [[ ! -z "$IPYTHON" ]]; then if [[ "$IPYTHON" == "master" ]]; then SPEC=git+https://github.com/ipython/ipython#egg=ipython else SPEC="ipython==$IPYTHON" fi pip install --upgrade --pre "$SPEC" fi - pip freeze script: - jupyter kernelspec list - pytest --cov ipykernel --durations 10 -v ipykernel after_success: - codecov matrix: include: - python: 3.6 env: - IPYTHON=master allow_failures: - python: "nightly" ipykernel-5.2.0/CONTRIBUTING.md000066400000000000000000000035011363550014400157510ustar00rootroot00000000000000# Contributing Welcome! For contributing tips, follow the [Jupyter Contributing Guide](https://jupyter.readthedocs.io/en/latest/contributor/content-contributor.html). Please make sure to follow the [Jupyter Code of Conduct](https://github.com/jupyter/governance/blob/master/conduct/code_of_conduct.md). ## Installing ipykernel for development ipykernel is a pure Python package, so setting up for development is the same as most other Python projects: ```bash # clone the repo git clone https://github.com/ipython/ipykernel cd ipykernel # do a 'development' or 'editable' install with pip: pip install -e . ``` ## Releasing ipykernel Releasing ipykernel is *almost* standard for a Python package: - set version for release - make and publish tag - publish release to PyPI - set version back to development The one extra step for ipykernel is that we need to make separate wheels for Python 2 and 3 because the bundled kernelspec has different contents for Python 2 and 3. This affects only the 4.x branch of ipykernel as the 5+ version is only compatible Python 3. The full release process is available below: ```bash # make sure version is set in ipykernel/_version.py VERSION="4.9.0" # commit the version and make a release tag git add ipykernel/_version.py git commit -m "release $VERSION" git tag -am "release $VERSION" $VERSION # push the changes to the repo git push git push --tags # publish the release to PyPI # note the extra `python2 setup.py bdist_wheel` for creating # the wheel for Python 2 pip install --upgrade twine git clean -xfd python3 setup.py sdist bdist_wheel python2 setup.py bdist_wheel # the extra step for the 4.x branch. twine upload dist/* # set the version back to '.dev' in ipykernel/_version.py # e.g. 4.10.0.dev if we just released 4.9.0 git add ipykernel/_version.py git commit -m "back to dev" git push ``` ipykernel-5.2.0/COPYING.md000066400000000000000000000054231363550014400151570ustar00rootroot00000000000000# Licensing terms This project is licensed under the terms of the Modified BSD License (also known as New or Revised or 3-Clause BSD), as follows: - Copyright (c) 2015, IPython Development Team All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the IPython Development Team nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ## About the IPython Development Team The IPython Development Team is the set of all contributors to the IPython project. This includes all of the IPython subprojects. The core team that coordinates development on GitHub can be found here: https://github.com/ipython/. ## Our Copyright Policy IPython uses a shared copyright model. Each contributor maintains copyright over their contributions to IPython. But, it is important to note that these contributions are typically only changes to the repositories. Thus, the IPython source code, in its entirety is not the copyright of any single person or institution. Instead, it is the collective copyright of the entire IPython Development Team. If individual contributors want to maintain a record of what changes/contributions they have specific copyright on, they should indicate their copyright in the commit message of the change, when they commit the change to one of the IPython repositories. With this in mind, the following banner should be used in any source code file to indicate the copyright and license terms: # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. ipykernel-5.2.0/MANIFEST.in000066400000000000000000000006451363550014400152640ustar00rootroot00000000000000include COPYING.md include CONTRIBUTING.md include README.md include pyproject.toml # Documentation graft docs exclude docs/\#* # Examples graft examples # docs subdirs we want to skip prune docs/_build prune docs/gh-pages prune docs/dist # Patterns to exclude from any directory global-exclude *~ global-exclude *.pyc global-exclude *.pyo global-exclude .git global-exclude .ipynb_checkpoints prune data_kernelspec ipykernel-5.2.0/README.md000066400000000000000000000013701363550014400150010ustar00rootroot00000000000000# IPython Kernel for Jupyter This package provides the IPython kernel for Jupyter. ## Installation from source 1. `git clone` 2. `cd ipykernel` 3. `pip install -e .` After that, all normal `ipython` commands will use this newly-installed version of the kernel. ## Running tests Ensure you have `nosetests` and the `nose-warnings-filters` plugin installed with ```bash pip install nose nose-warnings-filters ``` and then from the root directory ```bash nosetests ipykernel ``` ## Running tests with coverage Follow the instructions from `Running tests`. Ensure you have the `coverage` module installed with ```bash pip install coverage ``` and then from the root directory ```bash nosetests --with-coverage --cover-package ipykernel ipykernel ``` ipykernel-5.2.0/appveyor.yml000066400000000000000000000013741363550014400161160ustar00rootroot00000000000000build: false shallow_clone: false skip_branch_with_pr: true clone_depth: 1 environment: matrix: - python: "C:/Python38-x64" - python: "C:/Python36-x64" - python: "C:/Python35" cache: - C:\Users\appveyor\AppData\Local\pip\Cache init: - cmd: set PATH=%python%;%python%\scripts;%PATH% install: - cmd: | python -m pip install --upgrade setuptools pip wheel pip --version - cmd: | pip install --pre -e . pip install ipykernel[test] - cmd: | pip install matplotlib numpy pip freeze - cmd: python -c "import ipykernel.kernelspec; ipykernel.kernelspec.install(user=True)" test_script: - cmd: pytest -v -x --cov ipykernel ipykernel on_success: - cmd: pip install codecov - cmd: codecov ipykernel-5.2.0/docs/000077500000000000000000000000001363550014400144515ustar00rootroot00000000000000ipykernel-5.2.0/docs/Makefile000066400000000000000000000164151363550014400161200ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/IPythonKernel.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/IPythonKernel.qhc" applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/IPythonKernel" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/IPythonKernel" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." ipykernel-5.2.0/docs/changelog.rst000066400000000000000000000262251363550014400171410ustar00rootroot00000000000000Changes in IPython kernel ========================= 5.2 --- 5.2.0 ***** 5.2.0 Includes several bugfixes and internal logic improvements. - Produce better traceback when kernel is interrupted (:ghpull:`491`) - Add ``InProcessKernelClient.control_channel`` for compatibility with jupyter-client v6.0.0 (:ghpull:`489`) - Drop support for Python 3.4 (:ghpull:`483`) - Work around issue related to Tornado with python3.8 on Windows (:ghpull:`480`, :ghpull:`481`) - Prevent entering event loop if it is None (:ghpull:`464`) - Use ``shell.input_transformer_manager`` when available (:ghpull:`411`) 5.1 --- 5.1.4 ***** 5.1.4 Includes a few bugfixes, especially for compatibility with Python 3.8 on Windows. - Fix pickle issues when using inline matplotlib backend (:ghpull:`476`) - Fix an error during kernel shutdown (:ghpull:`463`) - Fix compatibility issues with Python 3.8 (:ghpull:`456`, :ghpull:`461`) - Remove some dead code (:ghpull:`474`, :ghpull:`467`) 5.1.3 ***** 5.1.3 Includes several bugfixes and internal logic improvements. - Fix comm shutdown behavior by adding a ``deleting`` option to ``close`` which can be set to prevent registering new comm channels during shutdown (:ghpull: `433`, :ghpull: `435`) - Fix ``Heartbeat._bind_socket`` to return on the first bind (:ghpull: `431`) - Moved ``InProcessKernelClient.flush`` to ``DummySocket`` (:gphull: `437`) - Don't redirect stdout if nose machinery is not present (:ghpull: `427`) - Rename `_asyncio.py` to `_asyncio_utils.py` to avoid name conflicts on Python 3.6+ (:ghpull: `426`) - Only generate kernelspec when installing or building wheel (:ghpull: `425`) - Fix priority ordering of control-channel messages in some cases (:ghpull:`443`) 5.1.2 ***** 5.1.2 fixes some socket-binding race conditions that caused testing failures in nbconvert. - Fix socket-binding race conditions (:ghpull: `412`, :ghpull: `419`) - Add a no-op ``flush`` method to ``DummySocket`` and comply with stream API (:ghpull: `405`) - Update kernel version to indicate kernel v5.3 support (:ghpull: `394`) - Add testing for upcoming Python 3.8 and PEP 570 positional parameters (:ghpull: `396`, :ghpull: `408`) 5.1.1 ***** 5.1.1 fixes a bug that caused cells to get stuck in a busy state. - Flush after sending replies (:ghpull:`390`) 5.1.0 ***** 5.1.0 fixes some important regressions in 5.0, especially on Windows. `5.1.0 on GitHub `__ - Fix message-ordering bug that could result in out-of-order executions, especially on Windows (:ghpull:`356`) - Fix classifiers to indicate dropped Python 2 support (:ghpull:`354`) - Remove some dead code (:ghpull:`355`) - Support rich-media responses in ``inspect_requests`` (tooltips) (:ghpull:`361`) 5.0 --- 5.0.0 ***** `5.0.0 on GitHub `__ - Drop support for Python 2. ``ipykernel`` 5.0 requires Python >= 3.4 - Add support for IPython's asynchronous code execution (:ghpull:`323`) - Update release process in ``CONTRIBUTING.md`` (:ghpull:`339`) 4.10 ---- `4.10 on GitHub `__ - Fix compatibility with IPython 7.0 (:ghpull:`348`) - Fix compatibility in cases where sys.stdout can be None (:ghpull:`344`) 4.9 --- 4.9.0 ***** `4.9.0 on GitHub `__ - Python 3.3 is no longer supported (:ghpull:`336`) - Flush stdout/stderr in KernelApp before replacing (:ghpull:`314`) - Allow preserving stdout and stderr in KernelApp (:ghpull:`315`) - Override writable method on OutStream (:ghpull:`316`) - Add metadata to help display matplotlib figures legibly (:ghpull:`336`) 4.8 --- 4.8.2 ***** `4.8.2 on GitHub `__ - Fix compatibility issue with qt eventloop and pyzmq 17 (:ghpull:`307`). 4.8.1 ***** `4.8.1 on GitHub `__ - set zmq.ROUTER_HANDOVER socket option when available to workaround libzmq reconnect bug (:ghpull:`300`). - Fix sdists including absolute paths for kernelspec files, which prevented installation from sdist on Windows (:ghpull:`306`). 4.8.0 ***** `4.8.0 on GitHub `__ - Cleanly shutdown integrated event loops when shutting down the kernel. (:ghpull:`290`) - ``%gui qt`` now uses Qt 5 by default rather than Qt 4, following a similar change in terminal IPython. (:ghpull:`293`) - Fix event loop integration for :mod:`asyncio` when run with Tornado 5, which uses asyncio where available. (:ghpull:`296`) 4.7 --- 4.7.0 ***** `4.7.0 on GitHub `__ - Add event loop integration for :mod:`asyncio`. - Use the new IPython completer API. - Add support for displaying GIF images (mimetype ``image/gif``). - Allow the kernel to be interrupted without killing the Qt console. - Fix ``is_complete`` response with cell magics. - Clean up encoding of bytes objects. - Clean up help links to use ``https`` and improve display titles. - Clean up ioloop handling in preparation for tornado 5. 4.6 --- 4.6.1 ***** `4.6.1 on GitHub `__ - Fix eventloop-integration bug preventing Qt windows/widgets from displaying with ipykernel 4.6.0 and IPython ≥ 5.2. - Avoid deprecation warnings about naive datetimes when working with jupyter_client ≥ 5.0. 4.6.0 ***** `4.6.0 on GitHub `__ - Add to API `DisplayPublisher.publish` two new fully backward-compatible keyword-args: - `update: bool` - `transient: dict` - Support new `transient` key in `display_data` messages spec for `publish`. For a display data message, `transient` contains data that shouldn't be persisted to files or documents. Add a `display_id` to this `transient` dict by `display(obj, display_id=...)` - Add `ipykernel_launcher` module which removes the current working directory from `sys.path` before launching the kernel. This helps to reduce the cases where the kernel won't start because there's a `random.py` (or similar) module in the current working directory. - Add busy/idle messages on IOPub during processing of aborted requests - Add active event loop setting to GUI, which enables the correct response to IPython's `is_event_loop_running_xxx` - Include IPython kernelspec in wheels to reduce reliance on "native kernel spec" in jupyter_client - Modify `OutStream` to inherit from `TextIOBase` instead of object to improve API support and error reporting - Fix IPython kernel death messages at start, such as "Kernel Restarting..." and "Kernel appears to have died", when parent-poller handles PID 1 - Various bugfixes 4.5 --- 4.5.2 ***** `4.5.2 on GitHub `__ - Fix bug when instantiating Comms outside of the IPython kernel (introduced in 4.5.1). 4.5.1 ***** `4.5.1 on GitHub `__ - Add missing ``stream`` parameter to overridden :func:`getpass` - Remove locks from iopub thread, which could cause deadlocks during debugging - Fix regression where KeyboardInterrupt was treated as an aborted request, rather than an error - Allow instantiating Comms outside of the IPython kernel 4.5.0 ***** `4.5 on GitHub `__ - Use figure.dpi instead of savefig.dpi to set DPI for inline figures - Support ipympl matplotlib backend (requires IPython update as well to fully work) - Various bugfixes, including fixes for output coming from threads, and :func:`input` when called with non-string prompts, which stdlib allows. 4.4 --- 4.4.1 ***** `4.4.1 on GitHub `__ - Fix circular import of matplotlib on Python 2 caused by the inline backend changes in 4.4.0. 4.4.0 ***** `4.4.0 on GitHub `__ - Use `MPLBACKEND`_ environment variable to tell matplotlib >= 1.5 use use the inline backend by default. This is only done if MPLBACKEND is not already set and no backend has been explicitly loaded, so setting ``MPLBACKEND=Qt4Agg`` or calling ``%matplotlib notebook`` or ``matplotlib.use('Agg')`` will take precedence. - Fixes for logging problems caused by 4.3, where logging could go to the terminal instead of the notebook. - Add ``--sys-prefix`` and ``--profile`` arguments to :command:`ipython kernel install` - Allow Comm (Widget) messages to be sent from background threads. - Select inline matplotlib backend by default if ``%matplotlib`` magic or ``matplotlib.use()`` are not called explicitly (for matplotlib >= 1.5). - Fix some longstanding minor deviations from the message protocol (missing status: ok in a few replies, connect_reply format). - Remove calls to NoOpContext from IPython, deprecated in 5.0. .. _MPLBACKEND: http://matplotlib.org/devel/coding_guide.html?highlight=mplbackend#developing-a-new-backend 4.3 --- 4.3.2 ***** - Use a nonempty dummy session key for inprocess kernels to avoid security warnings. 4.3.1 ***** - Fix Windows Python 3.5 incompatibility caused by faulthandler patch in 4.3 4.3.0 ***** `4.3.0 on GitHub `__ - Publish all IO in a thread, via :class:`IOPubThread`. This solves the problem of requiring :meth:`sys.stdout.flush` to be called in the notebook to produce output promptly during long-running cells. - Remove references to outdated IPython guiref in kernel banner. - Patch faulthandler to use ``sys.__stderr__`` instead of forwarded ``sys.stderr``, which has no fileno when forwarded. - Deprecate some vestiges of the Big Split: - :func:`ipykernel.find_connection_file` is deprecated. Use :func:`jupyter_client.find_connection_file` instead. - Various pieces of code specific to IPython parallel are deprecated in ipykernel and moved to ipyparallel. 4.2 --- 4.2.2 ***** `4.2.2 on GitHub `__ - Don't show interactive debugging info when kernel crashes - Fix handling of numerical types in json_clean - Testing fixes for output capturing 4.2.1 ***** `4.2.1 on GitHub `__ - Fix default display name back to "Python X" instead of "pythonX" 4.2.0 ***** `4.2 on GitHub `_ - Support sending a full message in initial opening of comms (metadata, buffers were not previously allowed) - When using ``ipython kernel install --name`` to install the IPython kernelspec, default display-name to the same value as ``--name``. 4.1 --- 4.1.1 ***** `4.1.1 on GitHub `_ - Fix missing ``ipykernel.__version__`` on Python 2. - Fix missing ``target_name`` when opening comms from the frontend. 4.1.0 ***** `4.1 on GitHub `_ - add ``ipython kernel install`` entrypoint for installing the IPython kernelspec - provisional implementation of ``comm_info`` request/reply for msgspec v5.1 4.0 --- `4.0 on GitHub `_ 4.0 is the first release of ipykernel as a standalone package. ipykernel-5.2.0/docs/conf.py000066400000000000000000000233621363550014400157560ustar00rootroot00000000000000#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # IPython Kernel documentation build configuration file, created by # sphinx-quickstart on Mon Oct 5 11:32:44 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinxcontrib_github_alt', ] github_project_url = "https://github.com/ipython/ipykernel" # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'IPython Kernel' copyright = '2015, IPython Development Team' author = 'IPython Development Team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # version_ns = {} here = os.path.dirname(__file__) version_py = os.path.join(here, os.pardir, 'ipykernel', '_version.py') with open(version_py) as f: exec(compile(f.read(), version_py, 'exec'), version_ns) # The short X.Y version. version = '%i.%i' % version_ns['version_info'][:2] # The full version, including alpha/beta/rc tags. release = version_ns['__version__'] # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. default_role = 'literal' # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'ipykerneldoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'ipykernel.tex', 'IPython Kernel Documentation', 'IPython Development Team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'ipykernel', 'IPython Kernel Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'ipykernel', 'IPython Kernel Documentation', author, 'ipykernel', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { 'python': ('https://docs.python.org/3/', None), 'ipython': ('https://ipython.readthedocs.io/en/latest', None), 'jupyter': ('https://jupyter.readthedocs.io/en/latest', None), } ipykernel-5.2.0/docs/index.rst000066400000000000000000000006301363550014400163110ustar00rootroot00000000000000.. _index: IPython Kernel Docs =================== This contains minimal version-sensitive documentation for the IPython kernel package. Most IPython kernel documentation is in the `IPython documentation `_. Contents: .. toctree:: :maxdepth: 2 changelog.rst Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ipykernel-5.2.0/docs/make.bat000066400000000000000000000161321363550014400160610ustar00rootroot00000000000000@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. xml to make Docutils-native XML files echo. pseudoxml to make pseudoxml-XML files for display purposes echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled echo. coverage to run coverage check of the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) REM Check if sphinx-build is available and fallback to Python version if any %SPHINXBUILD% 2> nul if errorlevel 9009 goto sphinx_python goto sphinx_ok :sphinx_python set SPHINXBUILD=python -m sphinx.__init__ %SPHINXBUILD% 2> nul if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) :sphinx_ok if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\IPythonKernel.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\IPythonKernel.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdf" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdfja" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf-ja cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) if "%1" == "coverage" ( %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage if errorlevel 1 exit /b 1 echo. echo.Testing of coverage in the sources finished, look at the ^ results in %BUILDDIR%/coverage/python.txt. goto end ) if "%1" == "xml" ( %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml if errorlevel 1 exit /b 1 echo. echo.Build finished. The XML files are in %BUILDDIR%/xml. goto end ) if "%1" == "pseudoxml" ( %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml if errorlevel 1 exit /b 1 echo. echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. goto end ) :end ipykernel-5.2.0/docs/requirements.txt000066400000000000000000000000311363550014400177270ustar00rootroot00000000000000sphinxcontrib_github_alt ipykernel-5.2.0/examples/000077500000000000000000000000001363550014400153375ustar00rootroot00000000000000ipykernel-5.2.0/examples/embedding/000077500000000000000000000000001363550014400172555ustar00rootroot00000000000000ipykernel-5.2.0/examples/embedding/inprocess_qtconsole.py000066400000000000000000000045571363550014400237360ustar00rootroot00000000000000from __future__ import print_function import os import sys from qtconsole.rich_ipython_widget import RichIPythonWidget from qtconsole.inprocess import QtInProcessKernelManager from IPython.lib import guisupport def print_process_id(): print('Process ID is:', os.getpid()) def init_asyncio_patch(): """set default asyncio policy to be compatible with tornado Tornado 6 (at least) is not compatible with the default asyncio implementation on Windows Pick the older SelectorEventLoopPolicy on Windows if the known-incompatible default policy is in use. do this as early as possible to make it a low priority and overrideable ref: https://github.com/tornadoweb/tornado/issues/2608 FIXME: if/when tornado supports the defaults in asyncio, remove and bump tornado requirement for py38 """ if sys.platform.startswith("win") and sys.version_info >= (3, 8): import asyncio try: from asyncio import ( WindowsProactorEventLoopPolicy, WindowsSelectorEventLoopPolicy, ) except ImportError: pass # not affected else: if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy: # WindowsProactorEventLoopPolicy is not compatible with tornado 6 # fallback to the pre-3.8 default of Selector asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy()) def main(): # Print the ID of the main process print_process_id() init_asyncio_patch() app = guisupport.get_app_qt4() # Create an in-process kernel # >>> print_process_id() # will print the same process ID as the main process kernel_manager = QtInProcessKernelManager() kernel_manager.start_kernel() kernel = kernel_manager.kernel kernel.gui = 'qt4' kernel.shell.push({'foo': 43, 'print_process_id': print_process_id}) kernel_client = kernel_manager.client() kernel_client.start_channels() def stop(): kernel_client.stop_channels() kernel_manager.shutdown_kernel() app.exit() control = RichIPythonWidget() control.kernel_manager = kernel_manager control.kernel_client = kernel_client control.exit_requested.connect(stop) control.show() guisupport.start_event_loop_qt4(app) if __name__ == '__main__': main() ipykernel-5.2.0/examples/embedding/inprocess_terminal.py000066400000000000000000000040061363550014400235270ustar00rootroot00000000000000from __future__ import print_function import os import sys from ipykernel.inprocess import InProcessKernelManager from jupyter_console.ptshell import ZMQTerminalInteractiveShell def print_process_id(): print('Process ID is:', os.getpid()) def init_asyncio_patch(): """set default asyncio policy to be compatible with tornado Tornado 6 (at least) is not compatible with the default asyncio implementation on Windows Pick the older SelectorEventLoopPolicy on Windows if the known-incompatible default policy is in use. do this as early as possible to make it a low priority and overrideable ref: https://github.com/tornadoweb/tornado/issues/2608 FIXME: if/when tornado supports the defaults in asyncio, remove and bump tornado requirement for py38 """ if sys.platform.startswith("win") and sys.version_info >= (3, 8): import asyncio try: from asyncio import ( WindowsProactorEventLoopPolicy, WindowsSelectorEventLoopPolicy, ) except ImportError: pass # not affected else: if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy: # WindowsProactorEventLoopPolicy is not compatible with tornado 6 # fallback to the pre-3.8 default of Selector asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy()) def main(): print_process_id() # Create an in-process kernel # >>> print_process_id() # will print the same process ID as the main process init_asyncio_patch() kernel_manager = InProcessKernelManager() kernel_manager.start_kernel() kernel = kernel_manager.kernel kernel.gui = 'qt4' kernel.shell.push({'foo': 43, 'print_process_id': print_process_id}) client = kernel_manager.client() client.start_channels() shell = ZMQTerminalInteractiveShell(manager=kernel_manager, client=client) shell.mainloop() if __name__ == '__main__': main() ipykernel-5.2.0/examples/embedding/internal_ipkernel.py000066400000000000000000000037351363550014400233440ustar00rootroot00000000000000#----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import sys from IPython.lib.kernel import connect_qtconsole from ipykernel.kernelapp import IPKernelApp #----------------------------------------------------------------------------- # Functions and classes #----------------------------------------------------------------------------- def mpl_kernel(gui): """Launch and return an IPython kernel with matplotlib support for the desired gui """ kernel = IPKernelApp.instance() kernel.initialize(['python', '--matplotlib=%s' % gui, #'--log-level=10' ]) return kernel class InternalIPKernel(object): def init_ipkernel(self, backend): # Start IPython kernel with GUI event loop and mpl support self.ipkernel = mpl_kernel(backend) # To create and track active qt consoles self.consoles = [] # This application will also act on the shell user namespace self.namespace = self.ipkernel.shell.user_ns # Example: a variable that will be seen by the user in the shell, and # that the GUI modifies (the 'Counter++' button increments it): self.namespace['app_counter'] = 0 #self.namespace['ipkernel'] = self.ipkernel # dbg def print_namespace(self, evt=None): print("\n***Variables in User namespace***") for k, v in self.namespace.items(): if not k.startswith('_'): print('%s -> %r' % (k, v)) sys.stdout.flush() def new_qt_console(self, evt=None): """start a new qtconsole connected to our kernel""" return connect_qtconsole(self.ipkernel.abs_connection_file, profile=self.ipkernel.profile) def count(self, evt=None): self.namespace['app_counter'] += 1 def cleanup_consoles(self, evt=None): for c in self.consoles: c.kill() ipykernel-5.2.0/examples/embedding/ipkernel_qtapp.py000077500000000000000000000054071363550014400226560ustar00rootroot00000000000000#!/usr/bin/env python """Example integrating an IPython kernel into a GUI App. This trivial GUI application internally starts an IPython kernel, to which Qt consoles can be connected either by the user at the command line or started from the GUI itself, via a button. The GUI can also manipulate one variable in the kernel's namespace, and print the namespace to the console. Play with it by running the script and then opening one or more consoles, and pushing the 'Counter++' and 'Namespace' buttons. Upon exit, it should automatically close all consoles opened from the GUI. Consoles attached separately from a terminal will not be terminated, though they will notice that their kernel died. """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from PyQt4 import Qt from internal_ipkernel import InternalIPKernel #----------------------------------------------------------------------------- # Functions and classes #----------------------------------------------------------------------------- class SimpleWindow(Qt.QWidget, InternalIPKernel): def __init__(self, app): Qt.QWidget.__init__(self) self.app = app self.add_widgets() self.init_ipkernel('qt') def add_widgets(self): self.setGeometry(300, 300, 400, 70) self.setWindowTitle('IPython in your app') # Add simple buttons: console = Qt.QPushButton('Qt Console', self) console.setGeometry(10, 10, 100, 35) self.connect(console, Qt.SIGNAL('clicked()'), self.new_qt_console) namespace = Qt.QPushButton('Namespace', self) namespace.setGeometry(120, 10, 100, 35) self.connect(namespace, Qt.SIGNAL('clicked()'), self.print_namespace) count = Qt.QPushButton('Count++', self) count.setGeometry(230, 10, 80, 35) self.connect(count, Qt.SIGNAL('clicked()'), self.count) # Quit and cleanup quit = Qt.QPushButton('Quit', self) quit.setGeometry(320, 10, 60, 35) self.connect(quit, Qt.SIGNAL('clicked()'), Qt.qApp, Qt.SLOT('quit()')) self.app.connect(self.app, Qt.SIGNAL("lastWindowClosed()"), self.app, Qt.SLOT("quit()")) self.app.aboutToQuit.connect(self.cleanup_consoles) #----------------------------------------------------------------------------- # Main script #----------------------------------------------------------------------------- if __name__ == "__main__": app = Qt.QApplication([]) # Create our window win = SimpleWindow(app) win.show() # Very important, IPython-specific step: this gets GUI event loop # integration going, and it replaces calling app.exec_() win.ipkernel.start() ipykernel-5.2.0/examples/embedding/ipkernel_wxapp.py000077500000000000000000000103251363550014400226630ustar00rootroot00000000000000#!/usr/bin/env python """Example integrating an IPython kernel into a GUI App. This trivial GUI application internally starts an IPython kernel, to which Qt consoles can be connected either by the user at the command line or started from the GUI itself, via a button. The GUI can also manipulate one variable in the kernel's namespace, and print the namespace to the console. Play with it by running the script and then opening one or more consoles, and pushing the 'Counter++' and 'Namespace' buttons. Upon exit, it should automatically close all consoles opened from the GUI. Consoles attached separately from a terminal will not be terminated, though they will notice that their kernel died. Ref: Modified from wxPython source code wxPython/samples/simple/simple.py """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import sys import wx from internal_ipkernel import InternalIPKernel #----------------------------------------------------------------------------- # Functions and classes #----------------------------------------------------------------------------- class MyFrame(wx.Frame, InternalIPKernel): """ This is MyFrame. It just shows a few controls on a wxPanel, and has a simple menu. """ def __init__(self, parent, title): wx.Frame.__init__(self, parent, -1, title, pos=(150, 150), size=(350, 285)) # Create the menubar menuBar = wx.MenuBar() # and a menu menu = wx.Menu() # add an item to the menu, using \tKeyName automatically # creates an accelerator, the third param is some help text # that will show up in the statusbar menu.Append(wx.ID_EXIT, "E&xit\tAlt-X", "Exit this simple sample") # bind the menu event to an event handler self.Bind(wx.EVT_MENU, self.OnTimeToClose, id=wx.ID_EXIT) # and put the menu on the menubar menuBar.Append(menu, "&File") self.SetMenuBar(menuBar) self.CreateStatusBar() # Now create the Panel to put the other controls on. panel = wx.Panel(self) # and a few controls text = wx.StaticText(panel, -1, "Hello World!") text.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.BOLD)) text.SetSize(text.GetBestSize()) qtconsole_btn = wx.Button(panel, -1, "Qt Console") ns_btn = wx.Button(panel, -1, "Namespace") count_btn = wx.Button(panel, -1, "Count++") close_btn = wx.Button(panel, -1, "Quit") # bind the button events to handlers self.Bind(wx.EVT_BUTTON, self.new_qt_console, qtconsole_btn) self.Bind(wx.EVT_BUTTON, self.print_namespace, ns_btn) self.Bind(wx.EVT_BUTTON, self.count, count_btn) self.Bind(wx.EVT_BUTTON, self.OnTimeToClose, close_btn) # Use a sizer to layout the controls, stacked vertically and with # a 10 pixel border around each sizer = wx.BoxSizer(wx.VERTICAL) for ctrl in [text, qtconsole_btn, ns_btn, count_btn, close_btn]: sizer.Add(ctrl, 0, wx.ALL, 10) panel.SetSizer(sizer) panel.Layout() # Start the IPython kernel with gui support self.init_ipkernel('wx') def OnTimeToClose(self, evt): """Event handler for the button click.""" print("See ya later!") sys.stdout.flush() self.cleanup_consoles(evt) self.Close() # Not sure why, but our IPython kernel seems to prevent normal WX # shutdown, so an explicit exit() call is needed. sys.exit() class MyApp(wx.App): def OnInit(self): frame = MyFrame(None, "Simple wxPython App") self.SetTopWindow(frame) frame.Show(True) self.ipkernel = frame.ipkernel return True #----------------------------------------------------------------------------- # Main script #----------------------------------------------------------------------------- if __name__ == '__main__': app = MyApp(redirect=False, clearSigInt=False) # Very important, IPython-specific step: this gets GUI event loop # integration going, and it replaces calling app.MainLoop() app.ipkernel.start() ipykernel-5.2.0/ipykernel/000077500000000000000000000000001363550014400155235ustar00rootroot00000000000000ipykernel-5.2.0/ipykernel/__init__.py000066400000000000000000000001751363550014400176370ustar00rootroot00000000000000from ._version import version_info, __version__, kernel_protocol_version_info, kernel_protocol_version from .connect import *ipykernel-5.2.0/ipykernel/__main__.py000066400000000000000000000001441363550014400176140ustar00rootroot00000000000000if __name__ == '__main__': from ipykernel import kernelapp as app app.launch_new_instance() ipykernel-5.2.0/ipykernel/_eventloop_macos.py000066400000000000000000000077201363550014400214370ustar00rootroot00000000000000"""Eventloop hook for OS X Calls NSApp / CoreFoundation APIs via ctypes. """ # cribbed heavily from IPython.terminal.pt_inputhooks.osx # obj-c boilerplate from appnope, used under BSD 2-clause import ctypes import ctypes.util from threading import Event objc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('objc')) void_p = ctypes.c_void_p objc.objc_getClass.restype = void_p objc.sel_registerName.restype = void_p objc.objc_msgSend.restype = void_p objc.objc_msgSend.argtypes = [void_p, void_p] msg = objc.objc_msgSend def _utf8(s): """ensure utf8 bytes""" if not isinstance(s, bytes): s = s.encode('utf8') return s def n(name): """create a selector name (for ObjC methods)""" return objc.sel_registerName(_utf8(name)) def C(classname): """get an ObjC Class by name""" return objc.objc_getClass(_utf8(classname)) # end obj-c boilerplate from appnope # CoreFoundation C-API calls we will use: CoreFoundation = ctypes.cdll.LoadLibrary(ctypes.util.find_library('CoreFoundation')) CFAbsoluteTimeGetCurrent = CoreFoundation.CFAbsoluteTimeGetCurrent CFAbsoluteTimeGetCurrent.restype = ctypes.c_double CFRunLoopGetCurrent = CoreFoundation.CFRunLoopGetCurrent CFRunLoopGetCurrent.restype = void_p CFRunLoopGetMain = CoreFoundation.CFRunLoopGetMain CFRunLoopGetMain.restype = void_p CFRunLoopStop = CoreFoundation.CFRunLoopStop CFRunLoopStop.restype = None CFRunLoopStop.argtypes = [void_p] CFRunLoopTimerCreate = CoreFoundation.CFRunLoopTimerCreate CFRunLoopTimerCreate.restype = void_p CFRunLoopTimerCreate.argtypes = [ void_p, # allocator (NULL) ctypes.c_double, # fireDate ctypes.c_double, # interval ctypes.c_int, # flags (0) ctypes.c_int, # order (0) void_p, # callout void_p, # context ] CFRunLoopAddTimer = CoreFoundation.CFRunLoopAddTimer CFRunLoopAddTimer.restype = None CFRunLoopAddTimer.argtypes = [ void_p, void_p, void_p ] kCFRunLoopCommonModes = void_p.in_dll(CoreFoundation, 'kCFRunLoopCommonModes') def _NSApp(): """Return the global NSApplication instance (NSApp)""" return msg(C('NSApplication'), n('sharedApplication')) def _wake(NSApp): """Wake the Application""" event = msg(C('NSEvent'), n('otherEventWithType:location:modifierFlags:' 'timestamp:windowNumber:context:subtype:data1:data2:'), 15, # Type 0, # location 0, # flags 0, # timestamp 0, # window None, # context 0, # subtype 0, # data1 0, # data2 ) msg(NSApp, n('postEvent:atStart:'), void_p(event), True) _triggered = Event() def stop(timer=None, loop=None): """Callback to fire when there's input to be read""" _triggered.set() NSApp = _NSApp() # if NSApp is not running, stop CFRunLoop directly, # otherwise stop and wake NSApp if msg(NSApp, n('isRunning')): msg(NSApp, n('stop:'), NSApp) _wake(NSApp) else: CFRunLoopStop(CFRunLoopGetCurrent()) _c_callback_func_type = ctypes.CFUNCTYPE(None, void_p, void_p) _c_stop_callback = _c_callback_func_type(stop) def _stop_after(delay): """Register callback to stop eventloop after a delay""" timer = CFRunLoopTimerCreate( None, # allocator CFAbsoluteTimeGetCurrent() + delay, # fireDate 0, # interval 0, # flags 0, # order _c_stop_callback, None, ) CFRunLoopAddTimer( CFRunLoopGetMain(), timer, kCFRunLoopCommonModes, ) def mainloop(duration=1): """run the Cocoa eventloop for the specified duration (seconds)""" _triggered.clear() NSApp = _NSApp() _stop_after(duration) msg(NSApp, n('run')) if not _triggered.is_set(): # app closed without firing callback, # probably due to last window being closed. # Run the loop manually in this case, # since there may be events still to process (ipython/ipython#9734) CoreFoundation.CFRunLoopRun() ipykernel-5.2.0/ipykernel/_version.py000066400000000000000000000010751363550014400177240ustar00rootroot00000000000000version_info = (5, 2, 0) __version__ = '.'.join(map(str, version_info[:3])) # pep440 is annoying, beta/alpha/rc should _not_ have dots or pip/setuptools # confuses which one between the wheel and sdist is the most recent. if len(version_info) == 4: extra = version_info[3] if extra.startswith(('a','b','rc')): __version__ = __version__+extra else: __version__ = __version__+'.'+extra if len(version_info) > 4: raise NotImplementedError kernel_protocol_version_info = (5, 3) kernel_protocol_version = '%s.%s' % kernel_protocol_version_info ipykernel-5.2.0/ipykernel/codeutil.py000066400000000000000000000025571363550014400177160ustar00rootroot00000000000000# encoding: utf-8 """Utilities to enable code objects to be pickled. Any process that import this module will be able to pickle code objects. This includes the func_code attribute of any function. Once unpickled, new functions can be built using new.function(code, globals()). Eventually we need to automate all of this so that functions themselves can be pickled. Reference: A. Tremols, P Cogolo, "Python Cookbook," p 302-305 """ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import warnings warnings.warn("ipykernel.codeutil is deprecated since IPykernel 4.3.1. It has moved to ipyparallel.serialize", DeprecationWarning) import sys import types try: import copyreg # Py 3 except ImportError: import copy_reg as copyreg # Py 2 def code_ctor(*args): return types.CodeType(*args) def reduce_code(co): args = [co.co_argcount, co.co_nlocals, co.co_stacksize, co.co_flags, co.co_code, co.co_consts, co.co_names, co.co_varnames, co.co_filename, co.co_name, co.co_firstlineno, co.co_lnotab, co.co_freevars, co.co_cellvars] if sys.version_info[0] >= 3: args.insert(1, co.co_kwonlyargcount) if sys.version_info > (3, 8, 0, 'alpha', 3): args.insert(1, co.co_posonlyargcount) return code_ctor, tuple(args) copyreg.pickle(types.CodeType, reduce_code) ipykernel-5.2.0/ipykernel/comm/000077500000000000000000000000001363550014400164565ustar00rootroot00000000000000ipykernel-5.2.0/ipykernel/comm/__init__.py000066400000000000000000000000531363550014400205650ustar00rootroot00000000000000from .manager import * from .comm import * ipykernel-5.2.0/ipykernel/comm/comm.py000066400000000000000000000125501363550014400177660ustar00rootroot00000000000000"""Base class for a Comm""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import uuid from traitlets.config import LoggingConfigurable from ipykernel.kernelbase import Kernel from ipykernel.jsonutil import json_clean from traitlets import Instance, Unicode, Bytes, Bool, Dict, Any, default class Comm(LoggingConfigurable): """Class for communicating between a Frontend and a Kernel""" kernel = Instance('ipykernel.kernelbase.Kernel', allow_none=True) @default('kernel') def _default_kernel(self): if Kernel.initialized(): return Kernel.instance() comm_id = Unicode() @default('comm_id') def _default_comm_id(self): return uuid.uuid4().hex primary = Bool(True, help="Am I the primary or secondary Comm?") target_name = Unicode('comm') target_module = Unicode(None, allow_none=True, help="""requirejs module from which to load comm target.""") topic = Bytes() @default('topic') def _default_topic(self): return ('comm-%s' % self.comm_id).encode('ascii') _open_data = Dict(help="data dict, if any, to be included in comm_open") _close_data = Dict(help="data dict, if any, to be included in comm_close") _msg_callback = Any() _close_callback = Any() _closed = Bool(True) def __init__(self, target_name='', data=None, metadata=None, buffers=None, **kwargs): if target_name: kwargs['target_name'] = target_name super(Comm, self).__init__(**kwargs) if self.kernel: if self.primary: # I am primary, open my peer. self.open(data=data, metadata=metadata, buffers=buffers) else: self._closed = False def _publish_msg(self, msg_type, data=None, metadata=None, buffers=None, **keys): """Helper for sending a comm message on IOPub""" data = {} if data is None else data metadata = {} if metadata is None else metadata content = json_clean(dict(data=data, comm_id=self.comm_id, **keys)) self.kernel.session.send(self.kernel.iopub_socket, msg_type, content, metadata=json_clean(metadata), parent=self.kernel._parent_header, ident=self.topic, buffers=buffers, ) def __del__(self): """trigger close on gc""" self.close(deleting=True) # publishing messages def open(self, data=None, metadata=None, buffers=None): """Open the frontend-side version of this comm""" if data is None: data = self._open_data comm_manager = getattr(self.kernel, 'comm_manager', None) if comm_manager is None: raise RuntimeError("Comms cannot be opened without a kernel " "and a comm_manager attached to that kernel.") comm_manager.register_comm(self) try: self._publish_msg('comm_open', data=data, metadata=metadata, buffers=buffers, target_name=self.target_name, target_module=self.target_module, ) self._closed = False except: comm_manager.unregister_comm(self) raise def close(self, data=None, metadata=None, buffers=None, deleting=False): """Close the frontend-side version of this comm""" if self._closed: # only close once return self._closed = True # nothing to send if we have no kernel # can be None during interpreter cleanup if not self.kernel: return if data is None: data = self._close_data self._publish_msg('comm_close', data=data, metadata=metadata, buffers=buffers, ) if not deleting: # If deleting, the comm can't be registered self.kernel.comm_manager.unregister_comm(self) def send(self, data=None, metadata=None, buffers=None): """Send a message to the frontend-side version of this comm""" self._publish_msg('comm_msg', data=data, metadata=metadata, buffers=buffers, ) # registering callbacks def on_close(self, callback): """Register a callback for comm_close Will be called with the `data` of the close message. Call `on_close(None)` to disable an existing callback. """ self._close_callback = callback def on_msg(self, callback): """Register a callback for comm_msg Will be called with the `data` of any comm_msg messages. Call `on_msg(None)` to disable an existing callback. """ self._msg_callback = callback # handling of incoming messages def handle_close(self, msg): """Handle a comm_close message""" self.log.debug("handle_close[%s](%s)", self.comm_id, msg) if self._close_callback: self._close_callback(msg) def handle_msg(self, msg): """Handle a comm_msg message""" self.log.debug("handle_msg[%s](%s)", self.comm_id, msg) if self._msg_callback: shell = self.kernel.shell if shell: shell.events.trigger('pre_execute') self._msg_callback(msg) if shell: shell.events.trigger('post_execute') __all__ = ['Comm'] ipykernel-5.2.0/ipykernel/comm/manager.py000066400000000000000000000100021363550014400204330ustar00rootroot00000000000000"""Base class to manage comms""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import sys import logging from traitlets.config import LoggingConfigurable from ipython_genutils.importstring import import_item from ipython_genutils.py3compat import string_types from traitlets import Instance, Unicode, Dict, Any, default from .comm import Comm class CommManager(LoggingConfigurable): """Manager for Comms in the Kernel""" kernel = Instance('ipykernel.kernelbase.Kernel') comms = Dict() targets = Dict() # Public APIs def register_target(self, target_name, f): """Register a callable f for a given target name f will be called with two arguments when a comm_open message is received with `target`: - the Comm instance - the `comm_open` message itself. f can be a Python callable or an import string for one. """ if isinstance(f, string_types): f = import_item(f) self.targets[target_name] = f def unregister_target(self, target_name, f): """Unregister a callable registered with register_target""" return self.targets.pop(target_name) def register_comm(self, comm): """Register a new comm""" comm_id = comm.comm_id comm.kernel = self.kernel self.comms[comm_id] = comm return comm_id def unregister_comm(self, comm): """Unregister a comm, and close its counterpart""" # unlike get_comm, this should raise a KeyError comm = self.comms.pop(comm.comm_id) def get_comm(self, comm_id): """Get a comm with a particular id Returns the comm if found, otherwise None. This will not raise an error, it will log messages if the comm cannot be found. """ try: return self.comms[comm_id] except KeyError: self.log.warning("No such comm: %s", comm_id) if self.log.isEnabledFor(logging.DEBUG): # don't create the list of keys if debug messages aren't enabled self.log.debug("Current comms: %s", list(self.comms.keys())) # Message handlers def comm_open(self, stream, ident, msg): """Handler for comm_open messages""" content = msg['content'] comm_id = content['comm_id'] target_name = content['target_name'] f = self.targets.get(target_name, None) comm = Comm(comm_id=comm_id, primary=False, target_name=target_name, ) self.register_comm(comm) if f is None: self.log.error("No such comm target registered: %s", target_name) else: try: f(comm, msg) return except Exception: self.log.error("Exception opening comm with target: %s", target_name, exc_info=True) # Failure. try: comm.close() except: self.log.error("""Could not close comm during `comm_open` failure clean-up. The comm may not have been opened yet.""", exc_info=True) def comm_msg(self, stream, ident, msg): """Handler for comm_msg messages""" content = msg['content'] comm_id = content['comm_id'] comm = self.get_comm(comm_id) if comm is None: return try: comm.handle_msg(msg) except Exception: self.log.error('Exception in comm_msg for %s', comm_id, exc_info=True) def comm_close(self, stream, ident, msg): """Handler for comm_close messages""" content = msg['content'] comm_id = content['comm_id'] comm = self.get_comm(comm_id) if comm is None: return self.comms[comm_id]._closed = True del self.comms[comm_id] try: comm.handle_close(msg) except Exception: self.log.error('Exception in comm_close for %s', comm_id, exc_info=True) __all__ = ['CommManager'] ipykernel-5.2.0/ipykernel/connect.py000066400000000000000000000141451363550014400175330ustar00rootroot00000000000000"""Connection file-related utilities for the kernel """ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import absolute_import import json import sys from subprocess import Popen, PIPE import warnings from IPython.core.profiledir import ProfileDir from IPython.paths import get_ipython_dir from ipython_genutils.path import filefind from ipython_genutils.py3compat import str_to_bytes, PY3 import jupyter_client from jupyter_client import write_connection_file def get_connection_file(app=None): """Return the path to the connection file of an app Parameters ---------- app : IPKernelApp instance [optional] If unspecified, the currently running app will be used """ if app is None: from ipykernel.kernelapp import IPKernelApp if not IPKernelApp.initialized(): raise RuntimeError("app not specified, and not in a running Kernel") app = IPKernelApp.instance() return filefind(app.connection_file, ['.', app.connection_dir]) def find_connection_file(filename='kernel-*.json', profile=None): """DEPRECATED: find a connection file, and return its absolute path. THIS FUNCTION IS DEPRECATED. Use jupyter_client.find_connection_file instead. Parameters ---------- filename : str The connection file or fileglob to search for. profile : str [optional] The name of the profile to use when searching for the connection file, if different from the current IPython session or 'default'. Returns ------- str : The absolute path of the connection file. """ import warnings warnings.warn("""ipykernel.find_connection_file is deprecated, use jupyter_client.find_connection_file""", DeprecationWarning, stacklevel=2) from IPython.core.application import BaseIPythonApplication as IPApp try: # quick check for absolute path, before going through logic return filefind(filename) except IOError: pass if profile is None: # profile unspecified, check if running from an IPython app if IPApp.initialized(): app = IPApp.instance() profile_dir = app.profile_dir else: # not running in IPython, use default profile profile_dir = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), 'default') else: # find profiledir by profile name: profile_dir = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), profile) security_dir = profile_dir.security_dir return jupyter_client.find_connection_file(filename, path=['.', security_dir]) def _find_connection_file(connection_file, profile=None): """Return the absolute path for a connection file - If nothing specified, return current Kernel's connection file - If profile specified, show deprecation warning about finding connection files in profiles - Otherwise, call jupyter_client.find_connection_file """ if connection_file is None: # get connection file from current kernel return get_connection_file() else: # connection file specified, allow shortnames: if profile is not None: warnings.warn( "Finding connection file by profile is deprecated.", DeprecationWarning, stacklevel=3, ) return find_connection_file(connection_file, profile=profile) else: return jupyter_client.find_connection_file(connection_file) def get_connection_info(connection_file=None, unpack=False, profile=None): """Return the connection information for the current Kernel. Parameters ---------- connection_file : str [optional] The connection file to be used. Can be given by absolute path, or IPython will search in the security directory of a given profile. If run from IPython, If unspecified, the connection file for the currently running IPython Kernel will be used, which is only allowed from inside a kernel. unpack : bool [default: False] if True, return the unpacked dict, otherwise just the string contents of the file. profile : DEPRECATED Returns ------- The connection dictionary of the current kernel, as string or dict, depending on `unpack`. """ cf = _find_connection_file(connection_file, profile) with open(cf) as f: info = f.read() if unpack: info = json.loads(info) # ensure key is bytes: info['key'] = str_to_bytes(info.get('key', '')) return info def connect_qtconsole(connection_file=None, argv=None, profile=None): """Connect a qtconsole to the current kernel. This is useful for connecting a second qtconsole to a kernel, or to a local notebook. Parameters ---------- connection_file : str [optional] The connection file to be used. Can be given by absolute path, or IPython will search in the security directory of a given profile. If run from IPython, If unspecified, the connection file for the currently running IPython Kernel will be used, which is only allowed from inside a kernel. argv : list [optional] Any extra args to be passed to the console. profile : DEPRECATED Returns ------- :class:`subprocess.Popen` instance running the qtconsole frontend """ argv = [] if argv is None else argv cf = _find_connection_file(connection_file, profile) cmd = ';'.join([ "from IPython.qt.console import qtconsoleapp", "qtconsoleapp.main()" ]) kwargs = {} if PY3: # Launch the Qt console in a separate session & process group, so # interrupting the kernel doesn't kill it. This kwarg is not on Py2. kwargs['start_new_session'] = True return Popen([sys.executable, '-c', cmd, '--existing', cf] + argv, stdout=PIPE, stderr=PIPE, close_fds=(sys.platform != 'win32'), **kwargs ) __all__ = [ 'write_connection_file', 'get_connection_file', 'find_connection_file', 'get_connection_info', 'connect_qtconsole', ] ipykernel-5.2.0/ipykernel/datapub.py000066400000000000000000000036231363550014400175210ustar00rootroot00000000000000"""Publishing native (typically pickled) objects. """ import warnings warnings.warn("ipykernel.datapub is deprecated. It has moved to ipyparallel.datapub", DeprecationWarning) # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from traitlets.config import Configurable from traitlets import Instance, Dict, CBytes, Any from ipykernel.jsonutil import json_clean from ipykernel.serialize import serialize_object from jupyter_client.session import Session, extract_header class ZMQDataPublisher(Configurable): topic = topic = CBytes(b'datapub') session = Instance(Session, allow_none=True) pub_socket = Any(allow_none=True) parent_header = Dict({}) def set_parent(self, parent): """Set the parent for outbound messages.""" self.parent_header = extract_header(parent) def publish_data(self, data): """publish a data_message on the IOPub channel Parameters ---------- data : dict The data to be published. Think of it as a namespace. """ session = self.session buffers = serialize_object(data, buffer_threshold=session.buffer_threshold, item_threshold=session.item_threshold, ) content = json_clean(dict(keys=list(data.keys()))) session.send(self.pub_socket, 'data_message', content=content, parent=self.parent_header, buffers=buffers, ident=self.topic, ) def publish_data(data): """publish a data_message on the IOPub channel Parameters ---------- data : dict The data to be published. Think of it as a namespace. """ warnings.warn("ipykernel.datapub is deprecated. It has moved to ipyparallel.datapub", DeprecationWarning) from ipykernel.zmqshell import ZMQInteractiveShell ZMQInteractiveShell.instance().data_pub.publish_data(data) ipykernel-5.2.0/ipykernel/displayhook.py000066400000000000000000000051731363550014400204310ustar00rootroot00000000000000"""Replacements for sys.displayhook that publish over ZMQ.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import sys from IPython.core.displayhook import DisplayHook from ipykernel.jsonutil import encode_images, json_clean from ipython_genutils.py3compat import builtin_mod from traitlets import Instance, Dict, Any from jupyter_client.session import extract_header, Session class ZMQDisplayHook(object): """A simple displayhook that publishes the object's repr over a ZeroMQ socket.""" topic = b'execute_result' def __init__(self, session, pub_socket): self.session = session self.pub_socket = pub_socket self.parent_header = {} def get_execution_count(self): """This method is replaced in kernelapp""" return 0 def __call__(self, obj): if obj is None: return builtin_mod._ = obj sys.stdout.flush() sys.stderr.flush() contents = {u'execution_count': self.get_execution_count(), u'data': {'text/plain': repr(obj)}, u'metadata': {}} self.session.send(self.pub_socket, u'execute_result', contents, parent=self.parent_header, ident=self.topic) def set_parent(self, parent): self.parent_header = extract_header(parent) class ZMQShellDisplayHook(DisplayHook): """A displayhook subclass that publishes data using ZeroMQ. This is intended to work with an InteractiveShell instance. It sends a dict of different representations of the object.""" topic=None session = Instance(Session, allow_none=True) pub_socket = Any(allow_none=True) parent_header = Dict({}) def set_parent(self, parent): """Set the parent for outbound messages.""" self.parent_header = extract_header(parent) def start_displayhook(self): self.msg = self.session.msg(u'execute_result', { 'data': {}, 'metadata': {}, }, parent=self.parent_header) def write_output_prompt(self): """Write the output prompt.""" self.msg['content']['execution_count'] = self.prompt_count def write_format_data(self, format_dict, md_dict=None): self.msg['content']['data'] = json_clean(encode_images(format_dict)) self.msg['content']['metadata'] = md_dict def finish_displayhook(self): """Finish up all displayhook activities.""" sys.stdout.flush() sys.stderr.flush() if self.msg['content']['data']: self.session.send(self.pub_socket, self.msg, ident=self.topic) self.msg = None ipykernel-5.2.0/ipykernel/embed.py000066400000000000000000000040101363550014400171440ustar00rootroot00000000000000"""Simple function for embedding an IPython kernel """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import sys from IPython.utils.frame import extract_module_locals from .kernelapp import IPKernelApp #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- def embed_kernel(module=None, local_ns=None, **kwargs): """Embed and start an IPython kernel in a given scope. Parameters ---------- module : ModuleType, optional The module to load into IPython globals (default: caller) local_ns : dict, optional The namespace to load into IPython user namespace (default: caller) kwargs : various, optional Further keyword args are relayed to the IPKernelApp constructor, allowing configuration of the Kernel. Will only have an effect on the first embed_kernel call for a given process. """ # get the app if it exists, or set it up if it doesn't if IPKernelApp.initialized(): app = IPKernelApp.instance() else: app = IPKernelApp.instance(**kwargs) app.initialize([]) # Undo unnecessary sys module mangling from init_sys_modules. # This would not be necessary if we could prevent it # in the first place by using a different InteractiveShell # subclass, as in the regular embed case. main = app.kernel.shell._orig_sys_modules_main_mod if main is not None: sys.modules[app.kernel.shell._orig_sys_modules_main_name] = main # load the calling scope if not given (caller_module, caller_locals) = extract_module_locals(1) if module is None: module = caller_module if local_ns is None: local_ns = caller_locals app.kernel.user_module = module app.kernel.user_ns = local_ns app.shell.set_completer_frame() app.start() ipykernel-5.2.0/ipykernel/eventloops.py000066400000000000000000000270501363550014400202770ustar00rootroot00000000000000# encoding: utf-8 """Event loop integration for the ZeroMQ-based kernels.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from functools import partial import os import sys import platform import zmq from distutils.version import LooseVersion as V from traitlets.config.application import Application def _use_appnope(): """Should we use appnope for dealing with OS X app nap? Checks if we are on OS X 10.9 or greater. """ return sys.platform == 'darwin' and V(platform.mac_ver()[0]) >= V('10.9') def _notify_stream_qt(kernel, stream): from IPython.external.qt_for_kernel import QtCore def process_stream_events(): """fall back to main loop when there's a socket event""" # call flush to ensure that the stream doesn't lose events # due to our consuming of the edge-triggered FD # flush returns the number of events consumed. # if there were any, wake it up if stream.flush(limit=1): notifier.setEnabled(False) kernel.app.quit() fd = stream.getsockopt(zmq.FD) notifier = QtCore.QSocketNotifier(fd, QtCore.QSocketNotifier.Read, kernel.app) notifier.activated.connect(process_stream_events) # there may already be unprocessed events waiting. # these events will not wake zmq's edge-triggered FD # since edge-triggered notification only occurs on new i/o activity. # process all the waiting events immediately # so we start in a clean state ensuring that any new i/o events will notify. # schedule first call on the eventloop as soon as it's running, # so we don't block here processing events timer = QtCore.QTimer(kernel.app) timer.setSingleShot(True) timer.timeout.connect(process_stream_events) timer.start(0) # mapping of keys to loop functions loop_map = { 'inline': None, 'nbagg': None, 'notebook': None, 'ipympl': None, 'widget': None, None: None, } def register_integration(*toolkitnames): """Decorator to register an event loop to integrate with the IPython kernel The decorator takes names to register the event loop as for the %gui magic. You can provide alternative names for the same toolkit. The decorated function should take a single argument, the IPython kernel instance, arrange for the event loop to call ``kernel.do_one_iteration()`` at least every ``kernel._poll_interval`` seconds, and start the event loop. :mod:`ipykernel.eventloops` provides and registers such functions for a few common event loops. """ def decorator(func): for name in toolkitnames: loop_map[name] = func func.exit_hook = lambda kernel: None def exit_decorator(exit_func): """@func.exit is now a decorator to register a function to be called on exit """ func.exit_hook = exit_func return exit_func func.exit = exit_decorator return func return decorator def _loop_qt(app): """Inner-loop for running the Qt eventloop Pulled from guisupport.start_event_loop in IPython < 5.2, since IPython 5.2 only checks `get_ipython().active_eventloop` is defined, rather than if the eventloop is actually running. """ app._in_event_loop = True app.exec_() app._in_event_loop = False @register_integration('qt4') def loop_qt4(kernel): """Start a kernel with PyQt4 event loop integration.""" from IPython.lib.guisupport import get_app_qt4 kernel.app = get_app_qt4([" "]) kernel.app.setQuitOnLastWindowClosed(False) for s in kernel.shell_streams: _notify_stream_qt(kernel, s) _loop_qt(kernel.app) @register_integration('qt', 'qt5') def loop_qt5(kernel): """Start a kernel with PyQt5 event loop integration.""" os.environ['QT_API'] = 'pyqt5' return loop_qt4(kernel) # exit and watch are the same for qt 4 and 5 @loop_qt4.exit @loop_qt5.exit def loop_qt_exit(kernel): kernel.app.exit() def _loop_wx(app): """Inner-loop for running the Wx eventloop Pulled from guisupport.start_event_loop in IPython < 5.2, since IPython 5.2 only checks `get_ipython().active_eventloop` is defined, rather than if the eventloop is actually running. """ app._in_event_loop = True app.MainLoop() app._in_event_loop = False @register_integration('wx') def loop_wx(kernel): """Start a kernel with wx event loop support.""" import wx # Wx uses milliseconds poll_interval = int(1000 * kernel._poll_interval) def wake(): """wake from wx""" for stream in kernel.shell_streams: if stream.flush(limit=1): kernel.app.ExitMainLoop() return # We have to put the wx.Timer in a wx.Frame for it to fire properly. # We make the Frame hidden when we create it in the main app below. class TimerFrame(wx.Frame): def __init__(self, func): wx.Frame.__init__(self, None, -1) self.timer = wx.Timer(self) # Units for the timer are in milliseconds self.timer.Start(poll_interval) self.Bind(wx.EVT_TIMER, self.on_timer) self.func = func def on_timer(self, event): self.func() # We need a custom wx.App to create our Frame subclass that has the # wx.Timer to defer back to the tornado event loop. class IPWxApp(wx.App): def OnInit(self): self.frame = TimerFrame(wake) self.frame.Show(False) return True # The redirect=False here makes sure that wx doesn't replace # sys.stdout/stderr with its own classes. if not ( getattr(kernel, 'app', None) and isinstance(kernel.app, wx.App) ): kernel.app = IPWxApp(redirect=False) # The import of wx on Linux sets the handler for signal.SIGINT # to 0. This is a bug in wx or gtk. We fix by just setting it # back to the Python default. import signal if not callable(signal.getsignal(signal.SIGINT)): signal.signal(signal.SIGINT, signal.default_int_handler) _loop_wx(kernel.app) @loop_wx.exit def loop_wx_exit(kernel): import wx wx.Exit() @register_integration('tk') def loop_tk(kernel): """Start a kernel with the Tk event loop.""" from tkinter import Tk, READABLE def process_stream_events(stream, *a, **kw): """fall back to main loop when there's a socket event""" if stream.flush(limit=1): app.tk.deletefilehandler(stream.getsockopt(zmq.FD)) app.quit() # For Tkinter, we create a Tk object and call its withdraw method. kernel.app = app = Tk() kernel.app.withdraw() for stream in kernel.shell_streams: notifier = partial(process_stream_events, stream) # seems to be needed for tk notifier.__name__ = 'notifier' app.tk.createfilehandler(stream.getsockopt(zmq.FD), READABLE, notifier) # schedule initial call after start app.after(0, notifier) app.mainloop() @loop_tk.exit def loop_tk_exit(kernel): kernel.app.destroy() @register_integration('gtk') def loop_gtk(kernel): """Start the kernel, coordinating with the GTK event loop""" from .gui.gtkembed import GTKEmbed gtk_kernel = GTKEmbed(kernel) gtk_kernel.start() kernel._gtk = gtk_kernel @loop_gtk.exit def loop_gtk_exit(kernel): kernel._gtk.stop() @register_integration('gtk3') def loop_gtk3(kernel): """Start the kernel, coordinating with the GTK event loop""" from .gui.gtk3embed import GTKEmbed gtk_kernel = GTKEmbed(kernel) gtk_kernel.start() kernel._gtk = gtk_kernel @loop_gtk3.exit def loop_gtk3_exit(kernel): kernel._gtk.stop() @register_integration('osx') def loop_cocoa(kernel): """Start the kernel, coordinating with the Cocoa CFRunLoop event loop via the matplotlib MacOSX backend. """ from ._eventloop_macos import mainloop, stop real_excepthook = sys.excepthook def handle_int(etype, value, tb): """don't let KeyboardInterrupts look like crashes""" # wake the eventloop when we get a signal stop() if etype is KeyboardInterrupt: print("KeyboardInterrupt caught in CFRunLoop", file=sys.__stdout__) else: real_excepthook(etype, value, tb) while not kernel.shell.exit_now: try: # double nested try/except, to properly catch KeyboardInterrupt # due to pyzmq Issue #130 try: # don't let interrupts during mainloop invoke crash_handler: sys.excepthook = handle_int mainloop(kernel._poll_interval) for stream in kernel.shell_streams: if stream.flush(limit=1): # events to process, return control to kernel return except: raise except KeyboardInterrupt: # Ctrl-C shouldn't crash the kernel print("KeyboardInterrupt caught in kernel", file=sys.__stdout__) finally: # ensure excepthook is restored sys.excepthook = real_excepthook @loop_cocoa.exit def loop_cocoa_exit(kernel): from ._eventloop_macos import stop stop() @register_integration('asyncio') def loop_asyncio(kernel): '''Start a kernel with asyncio event loop support.''' import asyncio loop = asyncio.get_event_loop() # loop is already running (e.g. tornado 5), nothing left to do if loop.is_running(): return if loop.is_closed(): # main loop is closed, create a new one loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop._should_close = False # pause eventloop when there's an event on a zmq socket def process_stream_events(stream): """fall back to main loop when there's a socket event""" if stream.flush(limit=1): loop.stop() for stream in kernel.shell_streams: fd = stream.getsockopt(zmq.FD) notifier = partial(process_stream_events, stream) loop.add_reader(fd, notifier) loop.call_soon(notifier) while True: error = None try: loop.run_forever() except KeyboardInterrupt: continue except Exception as e: error = e if loop._should_close: loop.close() if error is not None: raise error break @loop_asyncio.exit def loop_asyncio_exit(kernel): """Exit hook for asyncio""" import asyncio loop = asyncio.get_event_loop() @asyncio.coroutine def close_loop(): if hasattr(loop, 'shutdown_asyncgens'): yield from loop.shutdown_asyncgens() loop._should_close = True loop.stop() if loop.is_running(): close_loop() elif not loop.is_closed(): loop.run_until_complete(close_loop) loop.close() def enable_gui(gui, kernel=None): """Enable integration with a given GUI""" if gui not in loop_map: e = "Invalid GUI request %r, valid ones are:%s" % (gui, loop_map.keys()) raise ValueError(e) if kernel is None: if Application.initialized(): kernel = getattr(Application.instance(), 'kernel', None) if kernel is None: raise RuntimeError("You didn't specify a kernel," " and no IPython Application with a kernel appears to be running." ) loop = loop_map[gui] if loop and kernel.eventloop is not None and kernel.eventloop is not loop: raise RuntimeError("Cannot activate multiple GUI eventloops") kernel.eventloop = loop ipykernel-5.2.0/ipykernel/gui/000077500000000000000000000000001363550014400163075ustar00rootroot00000000000000ipykernel-5.2.0/ipykernel/gui/__init__.py000066400000000000000000000011041363550014400204140ustar00rootroot00000000000000"""GUI support for the IPython ZeroMQ kernel. This package contains the various toolkit-dependent utilities we use to enable coordination between the IPython kernel and the event loops of the various GUI toolkits. """ #----------------------------------------------------------------------------- # Copyright (C) 2010-2011 The IPython Development Team. # # Distributed under the terms of the BSD License. # # The full license is in the file COPYING.txt, distributed as part of this # software. #----------------------------------------------------------------------------- ipykernel-5.2.0/ipykernel/gui/gtk3embed.py000066400000000000000000000062261363550014400205340ustar00rootroot00000000000000"""GUI support for the IPython ZeroMQ kernel - GTK toolkit support. """ #----------------------------------------------------------------------------- # Copyright (C) 2010-2011 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING.txt, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # stdlib import sys # Third-party import gi gi.require_version ('Gdk', '3.0') gi.require_version ('Gtk', '3.0') from gi.repository import GObject, Gtk #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- class GTKEmbed(object): """A class to embed a kernel into the GTK main event loop. """ def __init__(self, kernel): self.kernel = kernel # These two will later store the real gtk functions when we hijack them self.gtk_main = None self.gtk_main_quit = None def start(self): """Starts the GTK main event loop and sets our kernel startup routine. """ # Register our function to initiate the kernel and start gtk GObject.idle_add(self._wire_kernel) Gtk.main() def _wire_kernel(self): """Initializes the kernel inside GTK. This is meant to run only once at startup, so it does its job and returns False to ensure it doesn't get run again by GTK. """ self.gtk_main, self.gtk_main_quit = self._hijack_gtk() GObject.timeout_add(int(1000*self.kernel._poll_interval), self.iterate_kernel) return False def iterate_kernel(self): """Run one iteration of the kernel and return True. GTK timer functions must return True to be called again, so we make the call to :meth:`do_one_iteration` and then return True for GTK. """ self.kernel.do_one_iteration() return True def stop(self): # FIXME: this one isn't getting called because we have no reliable # kernel shutdown. We need to fix that: once the kernel has a # shutdown mechanism, it can call this. self.gtk_main_quit() sys.exit() def _hijack_gtk(self): """Hijack a few key functions in GTK for IPython integration. Modifies pyGTK's main and main_quit with a dummy so user code does not block IPython. This allows us to use %run to run arbitrary pygtk scripts from a long-lived IPython session, and when they attempt to start or stop Returns ------- The original functions that have been hijacked: - Gtk.main - Gtk.main_quit """ def dummy(*args, **kw): pass # save and trap main and main_quit from gtk orig_main, Gtk.main = Gtk.main, dummy orig_main_quit, Gtk.main_quit = Gtk.main_quit, dummy return orig_main, orig_main_quit ipykernel-5.2.0/ipykernel/gui/gtkembed.py000066400000000000000000000060731363550014400204510ustar00rootroot00000000000000"""GUI support for the IPython ZeroMQ kernel - GTK toolkit support. """ #----------------------------------------------------------------------------- # Copyright (C) 2010-2011 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING.txt, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # stdlib import sys # Third-party import gobject import gtk #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- class GTKEmbed(object): """A class to embed a kernel into the GTK main event loop. """ def __init__(self, kernel): self.kernel = kernel # These two will later store the real gtk functions when we hijack them self.gtk_main = None self.gtk_main_quit = None def start(self): """Starts the GTK main event loop and sets our kernel startup routine. """ # Register our function to initiate the kernel and start gtk gobject.idle_add(self._wire_kernel) gtk.main() def _wire_kernel(self): """Initializes the kernel inside GTK. This is meant to run only once at startup, so it does its job and returns False to ensure it doesn't get run again by GTK. """ self.gtk_main, self.gtk_main_quit = self._hijack_gtk() gobject.timeout_add(int(1000*self.kernel._poll_interval), self.iterate_kernel) return False def iterate_kernel(self): """Run one iteration of the kernel and return True. GTK timer functions must return True to be called again, so we make the call to :meth:`do_one_iteration` and then return True for GTK. """ self.kernel.do_one_iteration() return True def stop(self): # FIXME: this one isn't getting called because we have no reliable # kernel shutdown. We need to fix that: once the kernel has a # shutdown mechanism, it can call this. self.gtk_main_quit() sys.exit() def _hijack_gtk(self): """Hijack a few key functions in GTK for IPython integration. Modifies pyGTK's main and main_quit with a dummy so user code does not block IPython. This allows us to use %run to run arbitrary pygtk scripts from a long-lived IPython session, and when they attempt to start or stop Returns ------- The original functions that have been hijacked: - gtk.main - gtk.main_quit """ def dummy(*args, **kw): pass # save and trap main and main_quit from gtk orig_main, gtk.main = gtk.main, dummy orig_main_quit, gtk.main_quit = gtk.main_quit, dummy return orig_main, orig_main_quit ipykernel-5.2.0/ipykernel/heartbeat.py000066400000000000000000000101421363550014400200320ustar00rootroot00000000000000"""The client and server for a basic ping-pong style heartbeat. """ #----------------------------------------------------------------------------- # Copyright (C) 2008-2011 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import errno import os import socket from threading import Thread import zmq from jupyter_client.localinterfaces import localhost #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- class Heartbeat(Thread): "A simple ping-pong style heartbeat that runs in a thread." def __init__(self, context, addr=None): if addr is None: addr = ('tcp', localhost(), 0) Thread.__init__(self) self.context = context self.transport, self.ip, self.port = addr self.original_port = self.port if self.original_port == 0: self.pick_port() self.addr = (self.ip, self.port) self.daemon = True def pick_port(self): if self.transport == 'tcp': s = socket.socket() # '*' means all interfaces to 0MQ, which is '' to socket.socket s.bind(('' if self.ip == '*' else self.ip, 0)) self.port = s.getsockname()[1] s.close() elif self.transport == 'ipc': self.port = 1 while os.path.exists("%s-%s" % (self.ip, self.port)): self.port = self.port + 1 else: raise ValueError("Unrecognized zmq transport: %s" % self.transport) return self.port def _try_bind_socket(self): c = ':' if self.transport == 'tcp' else '-' return self.socket.bind('%s://%s' % (self.transport, self.ip) + c + str(self.port)) def _bind_socket(self): try: win_in_use = errno.WSAEADDRINUSE except AttributeError: win_in_use = None # Try up to 100 times to bind a port when in conflict to avoid # infinite attempts in bad setups max_attempts = 1 if self.original_port else 100 for attempt in range(max_attempts): try: self._try_bind_socket() except zmq.ZMQError as ze: if attempt == max_attempts - 1: raise # Raise if we have any error not related to socket binding if ze.errno != errno.EADDRINUSE and ze.errno != win_in_use: raise # Raise if we have any error not related to socket binding if self.original_port == 0: self.pick_port() else: raise else: return def run(self): self.socket = self.context.socket(zmq.ROUTER) self.socket.linger = 1000 try: self._bind_socket() except Exception: self.socket.close() raise while True: try: zmq.device(zmq.QUEUE, self.socket, self.socket) except zmq.ZMQError as e: if e.errno == errno.EINTR: # signal interrupt, resume heartbeat continue elif e.errno == zmq.ETERM: # context terminated, close socket and exit try: self.socket.close() except zmq.ZMQError: # suppress further errors during cleanup # this shouldn't happen, though pass break elif e.errno == zmq.ENOTSOCK: # socket closed elsewhere, exit break else: raise else: break ipykernel-5.2.0/ipykernel/inprocess/000077500000000000000000000000001363550014400175305ustar00rootroot00000000000000ipykernel-5.2.0/ipykernel/inprocess/__init__.py000066400000000000000000000003231363550014400216370ustar00rootroot00000000000000from .channels import ( InProcessChannel, InProcessHBChannel, ) from .client import InProcessKernelClient from .manager import InProcessKernelManager from .blocking import BlockingInProcessKernelClient ipykernel-5.2.0/ipykernel/inprocess/blocking.py000066400000000000000000000057741363550014400217070ustar00rootroot00000000000000""" Implements a fully blocking kernel client. Useful for test suites and blocking terminal interfaces. """ #----------------------------------------------------------------------------- # Copyright (C) 2012 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING.txt, distributed as part of this software. #----------------------------------------------------------------------------- import sys try: from queue import Queue, Empty # Py 3 except ImportError: from Queue import Queue, Empty # Py 2 # IPython imports from traitlets import Type # Local imports from .channels import ( InProcessChannel, ) from .client import InProcessKernelClient class BlockingInProcessChannel(InProcessChannel): def __init__(self, *args, **kwds): super(BlockingInProcessChannel, self).__init__(*args, **kwds) self._in_queue = Queue() def call_handlers(self, msg): self._in_queue.put(msg) def get_msg(self, block=True, timeout=None): """ Gets a message if there is one that is ready. """ if timeout is None: # Queue.get(timeout=None) has stupid uninteruptible # behavior, so wait for a week instead timeout = 604800 return self._in_queue.get(block, timeout) def get_msgs(self): """ Get all messages that are currently ready. """ msgs = [] while True: try: msgs.append(self.get_msg(block=False)) except Empty: break return msgs def msg_ready(self): """ Is there a message that has been received? """ return not self._in_queue.empty() class BlockingInProcessStdInChannel(BlockingInProcessChannel): def call_handlers(self, msg): """ Overridden for the in-process channel. This methods simply calls raw_input directly. """ msg_type = msg['header']['msg_type'] if msg_type == 'input_request': _raw_input = self.client.kernel._sys_raw_input prompt = msg['content']['prompt'] print(prompt, end='', file=sys.__stdout__) sys.__stdout__.flush() self.client.input(_raw_input()) class BlockingInProcessKernelClient(InProcessKernelClient): # The classes to use for the various channels. shell_channel_class = Type(BlockingInProcessChannel) iopub_channel_class = Type(BlockingInProcessChannel) stdin_channel_class = Type(BlockingInProcessStdInChannel) def wait_for_ready(self): # Wait for kernel info reply on shell channel while True: msg = self.shell_channel.get_msg(block=True) if msg['msg_type'] == 'kernel_info_reply': self._handle_kernel_info_reply(msg) break # Flush IOPub channel while True: try: msg = self.iopub_channel.get_msg(block=True, timeout=0.2) print(msg['msg_type']) except Empty: break ipykernel-5.2.0/ipykernel/inprocess/channels.py000066400000000000000000000051051363550014400216760ustar00rootroot00000000000000"""A kernel client for in-process kernels.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from jupyter_client.channelsabc import HBChannelABC from .socket import DummySocket #----------------------------------------------------------------------------- # Channel classes #----------------------------------------------------------------------------- class InProcessChannel(object): """Base class for in-process channels.""" proxy_methods = [] def __init__(self, client=None): super(InProcessChannel, self).__init__() self.client = client self._is_alive = False def is_alive(self): return self._is_alive def start(self): self._is_alive = True def stop(self): self._is_alive = False def call_handlers(self, msg): """ This method is called in the main thread when a message arrives. Subclasses should override this method to handle incoming messages. """ raise NotImplementedError('call_handlers must be defined in a subclass.') def flush(self, timeout=1.0): pass def call_handlers_later(self, *args, **kwds): """ Call the message handlers later. The default implementation just calls the handlers immediately, but this method exists so that GUI toolkits can defer calling the handlers until after the event loop has run, as expected by GUI frontends. """ self.call_handlers(*args, **kwds) def process_events(self): """ Process any pending GUI events. This method will be never be called from a frontend without an event loop (e.g., a terminal frontend). """ raise NotImplementedError class InProcessHBChannel(object): """A dummy heartbeat channel interface for in-process kernels. Normally we use the heartbeat to check that the kernel process is alive. When the kernel is in-process, that doesn't make sense, but clients still expect this interface. """ time_to_dead = 3.0 def __init__(self, client=None): super(InProcessHBChannel, self).__init__() self.client = client self._is_alive = False self._pause = True def is_alive(self): return self._is_alive def start(self): self._is_alive = True def stop(self): self._is_alive = False def pause(self): self._pause = True def unpause(self): self._pause = False def is_beating(self): return not self._pause HBChannelABC.register(InProcessHBChannel) ipykernel-5.2.0/ipykernel/inprocess/client.py000066400000000000000000000152571363550014400213720ustar00rootroot00000000000000"""A client for in-process kernels.""" #----------------------------------------------------------------------------- # Copyright (C) 2012 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # IPython imports from ipykernel.inprocess.socket import DummySocket from traitlets import Type, Instance, default from jupyter_client.clientabc import KernelClientABC from jupyter_client.client import KernelClient # Local imports from .channels import ( InProcessChannel, InProcessHBChannel, ) #----------------------------------------------------------------------------- # Main kernel Client class #----------------------------------------------------------------------------- class InProcessKernelClient(KernelClient): """A client for an in-process kernel. This class implements the interface of `jupyter_client.clientabc.KernelClientABC` and allows (asynchronous) frontends to be used seamlessly with an in-process kernel. See `jupyter_client.client.KernelClient` for docstrings. """ # The classes to use for the various channels. shell_channel_class = Type(InProcessChannel) iopub_channel_class = Type(InProcessChannel) stdin_channel_class = Type(InProcessChannel) control_channel_class = Type(InProcessChannel) hb_channel_class = Type(InProcessHBChannel) kernel = Instance('ipykernel.inprocess.ipkernel.InProcessKernel', allow_none=True) #-------------------------------------------------------------------------- # Channel management methods #-------------------------------------------------------------------------- @default('blocking_class') def _default_blocking_class(self): from .blocking import BlockingInProcessKernelClient return BlockingInProcessKernelClient def get_connection_info(self): d = super(InProcessKernelClient, self).get_connection_info() d['kernel'] = self.kernel return d def start_channels(self, *args, **kwargs): super(InProcessKernelClient, self).start_channels() self.kernel.frontends.append(self) @property def shell_channel(self): if self._shell_channel is None: self._shell_channel = self.shell_channel_class(self) return self._shell_channel @property def iopub_channel(self): if self._iopub_channel is None: self._iopub_channel = self.iopub_channel_class(self) return self._iopub_channel @property def stdin_channel(self): if self._stdin_channel is None: self._stdin_channel = self.stdin_channel_class(self) return self._stdin_channel @property def control_channel(self): if self._control_channel is None: self._control_channel = self.control_channel_class(self) return self._control_channel @property def hb_channel(self): if self._hb_channel is None: self._hb_channel = self.hb_channel_class(self) return self._hb_channel # Methods for sending specific messages # ------------------------------------- def execute(self, code, silent=False, store_history=True, user_expressions={}, allow_stdin=None): if allow_stdin is None: allow_stdin = self.allow_stdin content = dict(code=code, silent=silent, store_history=store_history, user_expressions=user_expressions, allow_stdin=allow_stdin) msg = self.session.msg('execute_request', content) self._dispatch_to_kernel(msg) return msg['header']['msg_id'] def complete(self, code, cursor_pos=None): if cursor_pos is None: cursor_pos = len(code) content = dict(code=code, cursor_pos=cursor_pos) msg = self.session.msg('complete_request', content) self._dispatch_to_kernel(msg) return msg['header']['msg_id'] def inspect(self, code, cursor_pos=None, detail_level=0): if cursor_pos is None: cursor_pos = len(code) content = dict(code=code, cursor_pos=cursor_pos, detail_level=detail_level, ) msg = self.session.msg('inspect_request', content) self._dispatch_to_kernel(msg) return msg['header']['msg_id'] def history(self, raw=True, output=False, hist_access_type='range', **kwds): content = dict(raw=raw, output=output, hist_access_type=hist_access_type, **kwds) msg = self.session.msg('history_request', content) self._dispatch_to_kernel(msg) return msg['header']['msg_id'] def shutdown(self, restart=False): # FIXME: What to do here? raise NotImplementedError('Cannot shutdown in-process kernel') def kernel_info(self): """Request kernel info.""" msg = self.session.msg('kernel_info_request') self._dispatch_to_kernel(msg) return msg['header']['msg_id'] def comm_info(self, target_name=None): """Request a dictionary of valid comms and their targets.""" if target_name is None: content = {} else: content = dict(target_name=target_name) msg = self.session.msg('comm_info_request', content) self._dispatch_to_kernel(msg) return msg['header']['msg_id'] def input(self, string): if self.kernel is None: raise RuntimeError('Cannot send input reply. No kernel exists.') self.kernel.raw_input_str = string def is_complete(self, code): msg = self.session.msg('is_complete_request', {'code': code}) self._dispatch_to_kernel(msg) return msg['header']['msg_id'] def _dispatch_to_kernel(self, msg): """ Send a message to the kernel and handle a reply. """ kernel = self.kernel if kernel is None: raise RuntimeError('Cannot send request. No kernel exists.') stream = DummySocket() self.session.send(stream, msg) msg_parts = stream.recv_multipart() kernel.dispatch_shell(stream, msg_parts) idents, reply_msg = self.session.recv(stream, copy=False) self.shell_channel.call_handlers_later(reply_msg) #----------------------------------------------------------------------------- # ABC Registration #----------------------------------------------------------------------------- KernelClientABC.register(InProcessKernelClient) ipykernel-5.2.0/ipykernel/inprocess/constants.py000066400000000000000000000004571363550014400221240ustar00rootroot00000000000000"""Shared constants. """ # Because inprocess communication is not networked, we can use a common Session # key everywhere. This is not just the empty bytestring to avoid tripping # certain security checks in the rest of Jupyter that assumes that empty keys # are insecure. INPROCESS_KEY = b'inprocess' ipykernel-5.2.0/ipykernel/inprocess/ipkernel.py000066400000000000000000000153151363550014400217200ustar00rootroot00000000000000"""An in-process kernel""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from contextlib import contextmanager import logging import sys from IPython.core.interactiveshell import InteractiveShellABC from ipykernel.jsonutil import json_clean from traitlets import Any, Enum, Instance, List, Type, default from ipykernel.ipkernel import IPythonKernel from ipykernel.zmqshell import ZMQInteractiveShell from .constants import INPROCESS_KEY from .socket import DummySocket from ..iostream import OutStream, BackgroundSocket, IOPubThread #----------------------------------------------------------------------------- # Main kernel class #----------------------------------------------------------------------------- class InProcessKernel(IPythonKernel): #------------------------------------------------------------------------- # InProcessKernel interface #------------------------------------------------------------------------- # The frontends connected to this kernel. frontends = List( Instance('ipykernel.inprocess.client.InProcessKernelClient', allow_none=True) ) # The GUI environment that the kernel is running under. This need not be # specified for the normal operation for the kernel, but is required for # IPython's GUI support (including pylab). The default is 'inline' because # it is safe under all GUI toolkits. gui = Enum(('tk', 'gtk', 'wx', 'qt', 'qt4', 'inline'), default_value='inline') raw_input_str = Any() stdout = Any() stderr = Any() #------------------------------------------------------------------------- # Kernel interface #------------------------------------------------------------------------- shell_class = Type(allow_none=True) shell_streams = List() control_stream = Any() _underlying_iopub_socket = Instance(DummySocket, ()) iopub_thread = Instance(IOPubThread) @default('iopub_thread') def _default_iopub_thread(self): thread = IOPubThread(self._underlying_iopub_socket) thread.start() return thread iopub_socket = Instance(BackgroundSocket) @default('iopub_socket') def _default_iopub_socket(self): return self.iopub_thread.background_socket stdin_socket = Instance(DummySocket, ()) def __init__(self, **traits): super(InProcessKernel, self).__init__(**traits) self._underlying_iopub_socket.observe(self._io_dispatch, names=['message_sent']) self.shell.kernel = self def execute_request(self, stream, ident, parent): """ Override for temporary IO redirection. """ with self._redirected_io(): super(InProcessKernel, self).execute_request(stream, ident, parent) def start(self): """ Override registration of dispatchers for streams. """ self.shell.exit_now = False def _abort_queues(self): """ The in-process kernel doesn't abort requests. """ pass def _input_request(self, prompt, ident, parent, password=False): # Flush output before making the request. self.raw_input_str = None sys.stderr.flush() sys.stdout.flush() # Send the input request. content = json_clean(dict(prompt=prompt, password=password)) msg = self.session.msg(u'input_request', content, parent) for frontend in self.frontends: if frontend.session.session == parent['header']['session']: frontend.stdin_channel.call_handlers(msg) break else: logging.error('No frontend found for raw_input request') return str() # Await a response. while self.raw_input_str is None: frontend.stdin_channel.process_events() return self.raw_input_str #------------------------------------------------------------------------- # Protected interface #------------------------------------------------------------------------- @contextmanager def _redirected_io(self): """ Temporarily redirect IO to the kernel. """ sys_stdout, sys_stderr = sys.stdout, sys.stderr sys.stdout, sys.stderr = self.stdout, self.stderr yield sys.stdout, sys.stderr = sys_stdout, sys_stderr #------ Trait change handlers -------------------------------------------- def _io_dispatch(self, change): """ Called when a message is sent to the IO socket. """ ident, msg = self.session.recv(self.iopub_socket, copy=False) for frontend in self.frontends: frontend.iopub_channel.call_handlers(msg) #------ Trait initializers ----------------------------------------------- @default('log') def _default_log(self): return logging.getLogger(__name__) @default('session') def _default_session(self): from jupyter_client.session import Session return Session(parent=self, key=INPROCESS_KEY) @default('shell_class') def _default_shell_class(self): return InProcessInteractiveShell @default('stdout') def _default_stdout(self): return OutStream(self.session, self.iopub_thread, u'stdout') @default('stderr') def _default_stderr(self): return OutStream(self.session, self.iopub_thread, u'stderr') #----------------------------------------------------------------------------- # Interactive shell subclass #----------------------------------------------------------------------------- class InProcessInteractiveShell(ZMQInteractiveShell): kernel = Instance('ipykernel.inprocess.ipkernel.InProcessKernel', allow_none=True) #------------------------------------------------------------------------- # InteractiveShell interface #------------------------------------------------------------------------- def enable_gui(self, gui=None): """Enable GUI integration for the kernel.""" from ipykernel.eventloops import enable_gui if not gui: gui = self.kernel.gui enable_gui(gui, kernel=self.kernel) self.active_eventloop = gui def enable_matplotlib(self, gui=None): """Enable matplotlib integration for the kernel.""" if not gui: gui = self.kernel.gui return super(InProcessInteractiveShell, self).enable_matplotlib(gui) def enable_pylab(self, gui=None, import_all=True, welcome_message=False): """Activate pylab support at runtime.""" if not gui: gui = self.kernel.gui return super(InProcessInteractiveShell, self).enable_pylab(gui, import_all, welcome_message) InteractiveShellABC.register(InProcessInteractiveShell) ipykernel-5.2.0/ipykernel/inprocess/manager.py000066400000000000000000000053141363550014400215170ustar00rootroot00000000000000"""A kernel manager for in-process kernels.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from traitlets import Instance, DottedObjectName, default from jupyter_client.managerabc import KernelManagerABC from jupyter_client.manager import KernelManager from jupyter_client.session import Session from .constants import INPROCESS_KEY class InProcessKernelManager(KernelManager): """A manager for an in-process kernel. This class implements the interface of `jupyter_client.kernelmanagerabc.KernelManagerABC` and allows (asynchronous) frontends to be used seamlessly with an in-process kernel. See `jupyter_client.kernelmanager.KernelManager` for docstrings. """ # The kernel process with which the KernelManager is communicating. kernel = Instance('ipykernel.inprocess.ipkernel.InProcessKernel', allow_none=True) # the client class for KM.client() shortcut client_class = DottedObjectName('ipykernel.inprocess.BlockingInProcessKernelClient') @default('blocking_class') def _default_blocking_class(self): from .blocking import BlockingInProcessKernelClient return BlockingInProcessKernelClient @default('session') def _default_session(self): # don't sign in-process messages return Session(key=INPROCESS_KEY, parent=self) #-------------------------------------------------------------------------- # Kernel management methods #-------------------------------------------------------------------------- def start_kernel(self, **kwds): from ipykernel.inprocess.ipkernel import InProcessKernel self.kernel = InProcessKernel(parent=self, session=self.session) def shutdown_kernel(self): self.kernel.iopub_thread.stop() self._kill_kernel() def restart_kernel(self, now=False, **kwds): self.shutdown_kernel() self.start_kernel(**kwds) @property def has_kernel(self): return self.kernel is not None def _kill_kernel(self): self.kernel = None def interrupt_kernel(self): raise NotImplementedError("Cannot interrupt in-process kernel.") def signal_kernel(self, signum): raise NotImplementedError("Cannot signal in-process kernel.") def is_alive(self): return self.kernel is not None def client(self, **kwargs): kwargs['kernel'] = self.kernel return super(InProcessKernelManager, self).client(**kwargs) #----------------------------------------------------------------------------- # ABC Registration #----------------------------------------------------------------------------- KernelManagerABC.register(InProcessKernelManager) ipykernel-5.2.0/ipykernel/inprocess/socket.py000066400000000000000000000043361363550014400214000ustar00rootroot00000000000000""" Defines a dummy socket implementing (part of) the zmq.Socket interface. """ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import abc import warnings try: from queue import Queue # Py 3 except ImportError: from Queue import Queue # Py 2 import zmq from traitlets import HasTraits, Instance, Int from ipython_genutils.py3compat import with_metaclass #----------------------------------------------------------------------------- # Generic socket interface #----------------------------------------------------------------------------- class SocketABC(with_metaclass(abc.ABCMeta, object)): @abc.abstractmethod def recv_multipart(self, flags=0, copy=True, track=False): raise NotImplementedError @abc.abstractmethod def send_multipart(self, msg_parts, flags=0, copy=True, track=False): raise NotImplementedError @classmethod def register(cls, other_cls): if other_cls is not DummySocket: warnings.warn("SocketABC is deprecated since ipykernel version 4.5.0.", DeprecationWarning, stacklevel=2) abc.ABCMeta.register(cls, other_cls) #----------------------------------------------------------------------------- # Dummy socket class #----------------------------------------------------------------------------- class DummySocket(HasTraits): """ A dummy socket implementing (part of) the zmq.Socket interface. """ queue = Instance(Queue, ()) message_sent = Int(0) # Should be an Event context = Instance(zmq.Context) def _context_default(self): return zmq.Context() #------------------------------------------------------------------------- # Socket interface #------------------------------------------------------------------------- def recv_multipart(self, flags=0, copy=True, track=False): return self.queue.get_nowait() def send_multipart(self, msg_parts, flags=0, copy=True, track=False): msg_parts = list(map(zmq.Message, msg_parts)) self.queue.put_nowait(msg_parts) self.message_sent += 1 def flush(self, timeout=1.0): """no-op to comply with stream API""" pass SocketABC.register(DummySocket) ipykernel-5.2.0/ipykernel/inprocess/tests/000077500000000000000000000000001363550014400206725ustar00rootroot00000000000000ipykernel-5.2.0/ipykernel/inprocess/tests/__init__.py000066400000000000000000000000001363550014400227710ustar00rootroot00000000000000ipykernel-5.2.0/ipykernel/inprocess/tests/test_kernel.py000066400000000000000000000070621363550014400235700ustar00rootroot00000000000000# Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import print_function import sys import unittest from ipykernel.inprocess.blocking import BlockingInProcessKernelClient from ipykernel.inprocess.manager import InProcessKernelManager from ipykernel.inprocess.ipkernel import InProcessKernel from ipykernel.tests.utils import assemble_output from IPython.testing.decorators import skipif_not_matplotlib from IPython.utils.io import capture_output from ipython_genutils import py3compat if py3compat.PY3: from io import StringIO else: from StringIO import StringIO def _init_asyncio_patch(): """set default asyncio policy to be compatible with tornado Tornado 6 (at least) is not compatible with the default asyncio implementation on Windows Pick the older SelectorEventLoopPolicy on Windows if the known-incompatible default policy is in use. do this as early as possible to make it a low priority and overrideable ref: https://github.com/tornadoweb/tornado/issues/2608 FIXME: if/when tornado supports the defaults in asyncio, remove and bump tornado requirement for py38 """ if sys.platform.startswith("win") and sys.version_info >= (3, 8): import asyncio try: from asyncio import ( WindowsProactorEventLoopPolicy, WindowsSelectorEventLoopPolicy, ) except ImportError: pass # not affected else: if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy: # WindowsProactorEventLoopPolicy is not compatible with tornado 6 # fallback to the pre-3.8 default of Selector asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy()) class InProcessKernelTestCase(unittest.TestCase): def setUp(self): _init_asyncio_patch() self.km = InProcessKernelManager() self.km.start_kernel() self.kc = self.km.client() self.kc.start_channels() self.kc.wait_for_ready() @skipif_not_matplotlib def test_pylab(self): """Does %pylab work in the in-process kernel?""" kc = self.kc kc.execute('%pylab') out, err = assemble_output(kc.iopub_channel) self.assertIn('matplotlib', out) def test_raw_input(self): """ Does the in-process kernel handle raw_input correctly? """ io = StringIO('foobar\n') sys_stdin = sys.stdin sys.stdin = io try: if py3compat.PY3: self.kc.execute('x = input()') else: self.kc.execute('x = raw_input()') finally: sys.stdin = sys_stdin assert self.km.kernel.shell.user_ns.get('x') == 'foobar' def test_stdout(self): """ Does the in-process kernel correctly capture IO? """ kernel = InProcessKernel() with capture_output() as io: kernel.shell.run_cell('print("foo")') assert io.stdout == 'foo\n' kc = BlockingInProcessKernelClient(kernel=kernel, session=kernel.session) kernel.frontends.append(kc) kc.execute('print("bar")') out, err = assemble_output(kc.iopub_channel) assert out == 'bar\n' def test_getpass_stream(self): "Tests that kernel getpass accept the stream parameter" kernel = InProcessKernel() kernel._allow_stdin = True kernel._input_request = lambda *args, **kwargs : None kernel.getpass(stream='non empty') ipykernel-5.2.0/ipykernel/inprocess/tests/test_kernelmanager.py000066400000000000000000000066071363550014400251270ustar00rootroot00000000000000# Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import print_function import unittest from ipykernel.inprocess.blocking import BlockingInProcessKernelClient from ipykernel.inprocess.manager import InProcessKernelManager #----------------------------------------------------------------------------- # Test case #----------------------------------------------------------------------------- class InProcessKernelManagerTestCase(unittest.TestCase): def setUp(self): self.km = InProcessKernelManager() def tearDown(self): if self.km.has_kernel: self.km.shutdown_kernel() def test_interface(self): """ Does the in-process kernel manager implement the basic KM interface? """ km = self.km assert not km.has_kernel km.start_kernel() assert km.has_kernel assert km.kernel is not None kc = km.client() assert not kc.channels_running kc.start_channels() assert kc.channels_running old_kernel = km.kernel km.restart_kernel() self.assertIsNotNone(km.kernel) assert km.kernel != old_kernel km.shutdown_kernel() assert not km.has_kernel self.assertRaises(NotImplementedError, km.interrupt_kernel) self.assertRaises(NotImplementedError, km.signal_kernel, 9) kc.stop_channels() assert not kc.channels_running def test_execute(self): """ Does executing code in an in-process kernel work? """ km = self.km km.start_kernel() kc = km.client() kc.start_channels() kc.wait_for_ready() kc.execute('foo = 1') assert km.kernel.shell.user_ns['foo'] == 1 def test_complete(self): """ Does requesting completion from an in-process kernel work? """ km = self.km km.start_kernel() kc = km.client() kc.start_channels() kc.wait_for_ready() km.kernel.shell.push({'my_bar': 0, 'my_baz': 1}) kc.complete('my_ba', 5) msg = kc.get_shell_msg() assert msg['header']['msg_type'] == 'complete_reply' self.assertEqual(sorted(msg['content']['matches']), ['my_bar', 'my_baz']) def test_inspect(self): """ Does requesting object information from an in-process kernel work? """ km = self.km km.start_kernel() kc = km.client() kc.start_channels() kc.wait_for_ready() km.kernel.shell.user_ns['foo'] = 1 kc.inspect('foo') msg = kc.get_shell_msg() assert msg['header']['msg_type'] == 'inspect_reply' content = msg['content'] assert content['found'] text = content['data']['text/plain'] self.assertIn('int', text) def test_history(self): """ Does requesting history from an in-process kernel work? """ km = self.km km.start_kernel() kc = km.client() kc.start_channels() kc.wait_for_ready() kc.execute('1') kc.history(hist_access_type='tail', n=1) msg = kc.shell_channel.get_msgs()[-1] assert msg['header']['msg_type'] == 'history_reply' history = msg['content']['history'] assert len(history) == 1 assert history[0][2] == '1' if __name__ == '__main__': unittest.main() ipykernel-5.2.0/ipykernel/iostream.py000066400000000000000000000356111363550014400177260ustar00rootroot00000000000000# coding: utf-8 """Wrappers for forwarding stdout/stderr over zmq""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import print_function import atexit from binascii import b2a_hex from collections import deque try: from importlib import lock_held as import_lock_held except ImportError: from imp import lock_held as import_lock_held import os import sys import threading import warnings from io import StringIO, TextIOBase import zmq from zmq.eventloop.ioloop import IOLoop from zmq.eventloop.zmqstream import ZMQStream from jupyter_client.session import extract_header from ipython_genutils import py3compat from ipython_genutils.py3compat import unicode_type #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- MASTER = 0 CHILD = 1 #----------------------------------------------------------------------------- # IO classes #----------------------------------------------------------------------------- class IOPubThread(object): """An object for sending IOPub messages in a background thread Prevents a blocking main thread from delaying output from threads. IOPubThread(pub_socket).background_socket is a Socket-API-providing object whose IO is always run in a thread. """ def __init__(self, socket, pipe=False): """Create IOPub thread Parameters ---------- socket: zmq.PUB Socket the socket on which messages will be sent. pipe: bool Whether this process should listen for IOPub messages piped from subprocesses. """ self.socket = socket self.background_socket = BackgroundSocket(self) self._master_pid = os.getpid() self._pipe_flag = pipe self.io_loop = IOLoop(make_current=False) if pipe: self._setup_pipe_in() self._local = threading.local() self._events = deque() self._setup_event_pipe() self.thread = threading.Thread(target=self._thread_main) self.thread.daemon = True def _thread_main(self): """The inner loop that's actually run in a thread""" self.io_loop.make_current() self.io_loop.start() self.io_loop.close(all_fds=True) def _setup_event_pipe(self): """Create the PULL socket listening for events that should fire in this thread.""" ctx = self.socket.context pipe_in = ctx.socket(zmq.PULL) pipe_in.linger = 0 _uuid = b2a_hex(os.urandom(16)).decode('ascii') iface = self._event_interface = 'inproc://%s' % _uuid pipe_in.bind(iface) self._event_puller = ZMQStream(pipe_in, self.io_loop) self._event_puller.on_recv(self._handle_event) @property def _event_pipe(self): """thread-local event pipe for signaling events that should be processed in the thread""" try: event_pipe = self._local.event_pipe except AttributeError: # new thread, new event pipe ctx = self.socket.context event_pipe = ctx.socket(zmq.PUSH) event_pipe.linger = 0 event_pipe.connect(self._event_interface) self._local.event_pipe = event_pipe return event_pipe def _handle_event(self, msg): """Handle an event on the event pipe Content of the message is ignored. Whenever *an* event arrives on the event stream, *all* waiting events are processed in order. """ # freeze event count so new writes don't extend the queue # while we are processing n_events = len(self._events) for i in range(n_events): event_f = self._events.popleft() event_f() def _setup_pipe_in(self): """setup listening pipe for IOPub from forked subprocesses""" ctx = self.socket.context # use UUID to authenticate pipe messages self._pipe_uuid = os.urandom(16) pipe_in = ctx.socket(zmq.PULL) pipe_in.linger = 0 try: self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1") except zmq.ZMQError as e: warnings.warn("Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e + "\nsubprocess output will be unavailable." ) self._pipe_flag = False pipe_in.close() return self._pipe_in = ZMQStream(pipe_in, self.io_loop) self._pipe_in.on_recv(self._handle_pipe_msg) def _handle_pipe_msg(self, msg): """handle a pipe message from a subprocess""" if not self._pipe_flag or not self._is_master_process(): return if msg[0] != self._pipe_uuid: print("Bad pipe message: %s", msg, file=sys.__stderr__) return self.send_multipart(msg[1:]) def _setup_pipe_out(self): # must be new context after fork ctx = zmq.Context() pipe_out = ctx.socket(zmq.PUSH) pipe_out.linger = 3000 # 3s timeout for pipe_out sends before discarding the message pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port) return ctx, pipe_out def _is_master_process(self): return os.getpid() == self._master_pid def _check_mp_mode(self): """check for forks, and switch to zmq pipeline if necessary""" if not self._pipe_flag or self._is_master_process(): return MASTER else: return CHILD def start(self): """Start the IOPub thread""" self.thread.start() # make sure we don't prevent process exit # I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be. atexit.register(self.stop) def stop(self): """Stop the IOPub thread""" if not self.thread.is_alive(): return self.io_loop.add_callback(self.io_loop.stop) self.thread.join() if hasattr(self._local, 'event_pipe'): self._local.event_pipe.close() def close(self): if self.closed: return self.socket.close() self.socket = None @property def closed(self): return self.socket is None def schedule(self, f): """Schedule a function to be called in our IO thread. If the thread is not running, call immediately. """ if self.thread.is_alive(): self._events.append(f) # wake event thread (message content is ignored) self._event_pipe.send(b'') else: f() def send_multipart(self, *args, **kwargs): """send_multipart schedules actual zmq send in my thread. If my thread isn't running (e.g. forked process), send immediately. """ self.schedule(lambda : self._really_send(*args, **kwargs)) def _really_send(self, msg, *args, **kwargs): """The callback that actually sends messages""" mp_mode = self._check_mp_mode() if mp_mode != CHILD: # we are master, do a regular send self.socket.send_multipart(msg, *args, **kwargs) else: # we are a child, pipe to master # new context/socket for every pipe-out # since forks don't teardown politely, use ctx.term to ensure send has completed ctx, pipe_out = self._setup_pipe_out() pipe_out.send_multipart([self._pipe_uuid] + msg, *args, **kwargs) pipe_out.close() ctx.term() class BackgroundSocket(object): """Wrapper around IOPub thread that provides zmq send[_multipart]""" io_thread = None def __init__(self, io_thread): self.io_thread = io_thread def __getattr__(self, attr): """Wrap socket attr access for backward-compatibility""" if attr.startswith('__') and attr.endswith('__'): # don't wrap magic methods super(BackgroundSocket, self).__getattr__(attr) if hasattr(self.io_thread.socket, attr): warnings.warn("Accessing zmq Socket attribute %s on BackgroundSocket" % attr, DeprecationWarning, stacklevel=2) return getattr(self.io_thread.socket, attr) super(BackgroundSocket, self).__getattr__(attr) def __setattr__(self, attr, value): if attr == 'io_thread' or (attr.startswith('__' and attr.endswith('__'))): super(BackgroundSocket, self).__setattr__(attr, value) else: warnings.warn("Setting zmq Socket attribute %s on BackgroundSocket" % attr, DeprecationWarning, stacklevel=2) setattr(self.io_thread.socket, attr, value) def send(self, msg, *args, **kwargs): return self.send_multipart([msg], *args, **kwargs) def send_multipart(self, *args, **kwargs): """Schedule send in IO thread""" return self.io_thread.send_multipart(*args, **kwargs) class OutStream(TextIOBase): """A file like object that publishes the stream to a 0MQ PUB socket. Output is handed off to an IO Thread """ # timeout for flush to avoid infinite hang # in case of misbehavior flush_timeout = 10 # The time interval between automatic flushes, in seconds. flush_interval = 0.2 topic = None encoding = 'UTF-8' def __init__(self, session, pub_thread, name, pipe=None, echo=None): if pipe is not None: warnings.warn("pipe argument to OutStream is deprecated and ignored", DeprecationWarning) # This is necessary for compatibility with Python built-in streams self.session = session if not isinstance(pub_thread, IOPubThread): # Backward-compat: given socket, not thread. Wrap in a thread. warnings.warn("OutStream should be created with IOPubThread, not %r" % pub_thread, DeprecationWarning, stacklevel=2) pub_thread = IOPubThread(pub_thread) pub_thread.start() self.pub_thread = pub_thread self.name = name self.topic = b'stream.' + py3compat.cast_bytes(name) self.parent_header = {} self._master_pid = os.getpid() self._flush_pending = False self._io_loop = pub_thread.io_loop self._new_buffer() self.echo = None if echo: if hasattr(echo, 'read') and hasattr(echo, 'write'): self.echo = echo else: raise ValueError("echo argument must be a file like object") def _is_master_process(self): return os.getpid() == self._master_pid def set_parent(self, parent): self.parent_header = extract_header(parent) def close(self): self.pub_thread = None @property def closed(self): return self.pub_thread is None def _schedule_flush(self): """schedule a flush in the IO thread call this on write, to indicate that flush should be called soon. """ if self._flush_pending: return self._flush_pending = True # add_timeout has to be handed to the io thread via event pipe def _schedule_in_thread(): self._io_loop.call_later(self.flush_interval, self._flush) self.pub_thread.schedule(_schedule_in_thread) def flush(self): """trigger actual zmq send send will happen in the background thread """ if self.pub_thread and self.pub_thread.thread is not None and self.pub_thread.thread.is_alive(): # request flush on the background thread self.pub_thread.schedule(self._flush) # wait for flush to actually get through, if we can. # waiting across threads during import can cause deadlocks # so only wait if import lock is not held if not import_lock_held(): evt = threading.Event() self.pub_thread.schedule(evt.set) # and give a timeout to avoid if not evt.wait(self.flush_timeout): # write directly to __stderr__ instead of warning because # if this is happening sys.stderr may be the problem. print("IOStream.flush timed out", file=sys.__stderr__) else: self._flush() def _flush(self): """This is where the actual send happens. _flush should generally be called in the IO thread, unless the thread has been destroyed (e.g. forked subprocess). """ self._flush_pending = False if self.echo is not None: try: self.echo.flush() except OSError as e: if self.echo is not sys.__stderr__: print("Flush failed: {}".format(e), file=sys.__stderr__) data = self._flush_buffer() if data: # FIXME: this disables Session's fork-safe check, # since pub_thread is itself fork-safe. # There should be a better way to do this. self.session.pid = os.getpid() content = {u'name':self.name, u'text':data} self.session.send(self.pub_thread, u'stream', content=content, parent=self.parent_header, ident=self.topic) def write(self, string): if self.echo is not None: try: self.echo.write(string) except OSError as e: if self.echo is not sys.__stderr__: print("Write failed: {}".format(e), file=sys.__stderr__) if self.pub_thread is None: raise ValueError('I/O operation on closed file') else: # Make sure that we're handling unicode if not isinstance(string, unicode_type): string = string.decode(self.encoding, 'replace') is_child = (not self._is_master_process()) # only touch the buffer in the IO thread to avoid races self.pub_thread.schedule(lambda : self._buffer.write(string)) if is_child: # newlines imply flush in subprocesses # mp.Pool cannot be trusted to flush promptly (or ever), # and this helps. if '\n' in string: self.flush() else: self._schedule_flush() def writelines(self, sequence): if self.pub_thread is None: raise ValueError('I/O operation on closed file') else: for string in sequence: self.write(string) def writable(self): return True def _flush_buffer(self): """clear the current buffer and return the current buffer data. This should only be called in the IO thread. """ data = u'' if self._buffer is not None: buf = self._buffer self._new_buffer() data = buf.getvalue() buf.close() return data def _new_buffer(self): self._buffer = StringIO() ipykernel-5.2.0/ipykernel/ipkernel.py000066400000000000000000000452111363550014400177110ustar00rootroot00000000000000"""The IPython kernel implementation""" import asyncio from contextlib import contextmanager from functools import partial import getpass import signal import sys from IPython.core import release from ipython_genutils.py3compat import builtin_mod, PY3, unicode_type, safe_unicode from IPython.utils.tokenutil import token_at_cursor, line_at_cursor from tornado import gen from traitlets import Instance, Type, Any, List, Bool from .comm import CommManager from .kernelbase import Kernel as KernelBase from .zmqshell import ZMQInteractiveShell from .eventloops import _use_appnope try: from IPython.core.interactiveshell import _asyncio_runner except ImportError: _asyncio_runner = None try: from IPython.core.completer import rectify_completions as _rectify_completions, provisionalcompleter as _provisionalcompleter _use_experimental_60_completion = True except ImportError: _use_experimental_60_completion = False _EXPERIMENTAL_KEY_NAME = '_jupyter_types_experimental' class IPythonKernel(KernelBase): shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True) shell_class = Type(ZMQInteractiveShell) use_experimental_completions = Bool(True, help="Set this flag to False to deactivate the use of experimental IPython completion APIs.", ).tag(config=True) user_module = Any() def _user_module_changed(self, name, old, new): if self.shell is not None: self.shell.user_module = new user_ns = Instance(dict, args=None, allow_none=True) def _user_ns_changed(self, name, old, new): if self.shell is not None: self.shell.user_ns = new self.shell.init_user_ns() # A reference to the Python builtin 'raw_input' function. # (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3) _sys_raw_input = Any() _sys_eval_input = Any() def __init__(self, **kwargs): super(IPythonKernel, self).__init__(**kwargs) # Initialize the InteractiveShell subclass self.shell = self.shell_class.instance(parent=self, profile_dir = self.profile_dir, user_module = self.user_module, user_ns = self.user_ns, kernel = self, ) self.shell.displayhook.session = self.session self.shell.displayhook.pub_socket = self.iopub_socket self.shell.displayhook.topic = self._topic('execute_result') self.shell.display_pub.session = self.session self.shell.display_pub.pub_socket = self.iopub_socket self.comm_manager = CommManager(parent=self, kernel=self) self.shell.configurables.append(self.comm_manager) comm_msg_types = [ 'comm_open', 'comm_msg', 'comm_close' ] for msg_type in comm_msg_types: self.shell_handlers[msg_type] = getattr(self.comm_manager, msg_type) if _use_appnope() and self._darwin_app_nap: # Disable app-nap as the kernel is not a gui but can have guis import appnope appnope.nope() help_links = List([ { 'text': "Python Reference", 'url': "https://docs.python.org/%i.%i" % sys.version_info[:2], }, { 'text': "IPython Reference", 'url': "https://ipython.org/documentation.html", }, { 'text': "NumPy Reference", 'url': "https://docs.scipy.org/doc/numpy/reference/", }, { 'text': "SciPy Reference", 'url': "https://docs.scipy.org/doc/scipy/reference/", }, { 'text': "Matplotlib Reference", 'url': "https://matplotlib.org/contents.html", }, { 'text': "SymPy Reference", 'url': "http://docs.sympy.org/latest/index.html", }, { 'text': "pandas Reference", 'url': "https://pandas.pydata.org/pandas-docs/stable/", }, ]).tag(config=True) # Kernel info fields implementation = 'ipython' implementation_version = release.version language_info = { 'name': 'python', 'version': sys.version.split()[0], 'mimetype': 'text/x-python', 'codemirror_mode': { 'name': 'ipython', 'version': sys.version_info[0] }, 'pygments_lexer': 'ipython%d' % (3 if PY3 else 2), 'nbconvert_exporter': 'python', 'file_extension': '.py' } @property def banner(self): return self.shell.banner def start(self): self.shell.exit_now = False super(IPythonKernel, self).start() def set_parent(self, ident, parent): """Overridden from parent to tell the display hook and output streams about the parent message. """ super(IPythonKernel, self).set_parent(ident, parent) self.shell.set_parent(parent) def init_metadata(self, parent): """Initialize metadata. Run at the beginning of each execution request. """ md = super(IPythonKernel, self).init_metadata(parent) # FIXME: remove deprecated ipyparallel-specific code # This is required for ipyparallel < 5.0 md.update({ 'dependencies_met' : True, 'engine' : self.ident, }) return md def finish_metadata(self, parent, metadata, reply_content): """Finish populating metadata. Run after completing an execution request. """ # FIXME: remove deprecated ipyparallel-specific code # This is required by ipyparallel < 5.0 metadata['status'] = reply_content['status'] if reply_content['status'] == 'error' and reply_content['ename'] == 'UnmetDependency': metadata['dependencies_met'] = False return metadata def _forward_input(self, allow_stdin=False): """Forward raw_input and getpass to the current frontend. via input_request """ self._allow_stdin = allow_stdin if PY3: self._sys_raw_input = builtin_mod.input builtin_mod.input = self.raw_input else: self._sys_raw_input = builtin_mod.raw_input self._sys_eval_input = builtin_mod.input builtin_mod.raw_input = self.raw_input builtin_mod.input = lambda prompt='': eval(self.raw_input(prompt)) self._save_getpass = getpass.getpass getpass.getpass = self.getpass def _restore_input(self): """Restore raw_input, getpass""" if PY3: builtin_mod.input = self._sys_raw_input else: builtin_mod.raw_input = self._sys_raw_input builtin_mod.input = self._sys_eval_input getpass.getpass = self._save_getpass @property def execution_count(self): return self.shell.execution_count @execution_count.setter def execution_count(self, value): # Ignore the incrementing done by KernelBase, in favour of our shell's # execution counter. pass @contextmanager def _cancel_on_sigint(self, future): """ContextManager for capturing SIGINT and cancelling a future SIGINT raises in the event loop when running async code, but we want it to halt a coroutine. Ideally, it would raise KeyboardInterrupt, but this turns it into a CancelledError. At least it gets a decent traceback to the user. """ sigint_future = asyncio.Future() # whichever future finishes first, # cancel the other one def cancel_unless_done(f, _ignored): if f.cancelled() or f.done(): return f.cancel() # when sigint finishes, # abort the coroutine with CancelledError sigint_future.add_done_callback( partial(cancel_unless_done, future) ) # when the main future finishes, # stop watching for SIGINT events future.add_done_callback( partial(cancel_unless_done, sigint_future) ) def handle_sigint(*args): def set_sigint_result(): if sigint_future.cancelled() or sigint_future.done(): return sigint_future.set_result(1) # use add_callback for thread safety self.io_loop.add_callback(set_sigint_result) # set the custom sigint hander during this context save_sigint = signal.signal(signal.SIGINT, handle_sigint) try: yield finally: # restore the previous sigint handler signal.signal(signal.SIGINT, save_sigint) @gen.coroutine def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False): shell = self.shell # we'll need this a lot here self._forward_input(allow_stdin) reply_content = {} if hasattr(shell, 'run_cell_async') and hasattr(shell, 'should_run_async'): run_cell = shell.run_cell_async should_run_async = shell.should_run_async else: should_run_async = lambda cell: False # older IPython, # use blocking run_cell and wrap it in coroutine @gen.coroutine def run_cell(*args, **kwargs): return shell.run_cell(*args, **kwargs) try: # default case: runner is asyncio and asyncio is already running # TODO: this should check every case for "are we inside the runner", # not just asyncio if ( _asyncio_runner and should_run_async(code) and shell.loop_runner is _asyncio_runner and asyncio.get_event_loop().is_running() ): coro = run_cell(code, store_history=store_history, silent=silent) coro_future = asyncio.ensure_future(coro) with self._cancel_on_sigint(coro_future): res = yield coro_future else: # runner isn't already running, # make synchronous call, # letting shell dispatch to loop runners res = shell.run_cell(code, store_history=store_history, silent=silent) finally: self._restore_input() if res.error_before_exec is not None: err = res.error_before_exec else: err = res.error_in_exec if res.success: reply_content[u'status'] = u'ok' else: reply_content[u'status'] = u'error' reply_content.update({ u'traceback': shell._last_traceback or [], u'ename': unicode_type(type(err).__name__), u'evalue': safe_unicode(err), }) # FIXME: deprecated piece for ipyparallel (remove in 5.0): e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='execute') reply_content['engine_info'] = e_info # Return the execution counter so clients can display prompts reply_content['execution_count'] = shell.execution_count - 1 if 'traceback' in reply_content: self.log.info("Exception in execute request:\n%s", '\n'.join(reply_content['traceback'])) # At this point, we can tell whether the main code execution succeeded # or not. If it did, we proceed to evaluate user_expressions if reply_content['status'] == 'ok': reply_content[u'user_expressions'] = \ shell.user_expressions(user_expressions or {}) else: # If there was an error, don't even try to compute expressions reply_content[u'user_expressions'] = {} # Payloads should be retrieved regardless of outcome, so we can both # recover partial output (that could have been generated early in a # block, before an error) and always clear the payload system. reply_content[u'payload'] = shell.payload_manager.read_payload() # Be aggressive about clearing the payload because we don't want # it to sit in memory until the next execute_request comes in. shell.payload_manager.clear_payload() return reply_content def do_complete(self, code, cursor_pos): if _use_experimental_60_completion and self.use_experimental_completions: return self._experimental_do_complete(code, cursor_pos) # FIXME: IPython completers currently assume single line, # but completion messages give multi-line context # For now, extract line from cell, based on cursor_pos: if cursor_pos is None: cursor_pos = len(code) line, offset = line_at_cursor(code, cursor_pos) line_cursor = cursor_pos - offset txt, matches = self.shell.complete('', line, line_cursor) return {'matches' : matches, 'cursor_end' : cursor_pos, 'cursor_start' : cursor_pos - len(txt), 'metadata' : {}, 'status' : 'ok'} def _experimental_do_complete(self, code, cursor_pos): """ Experimental completions from IPython, using Jedi. """ if cursor_pos is None: cursor_pos = len(code) with _provisionalcompleter(): raw_completions = self.shell.Completer.completions(code, cursor_pos) completions = list(_rectify_completions(code, raw_completions)) comps = [] for comp in completions: comps.append(dict( start=comp.start, end=comp.end, text=comp.text, type=comp.type, )) if completions: s = completions[0].start e = completions[0].end matches = [c.text for c in completions] else: s = cursor_pos e = cursor_pos matches = [] return {'matches': matches, 'cursor_end': e, 'cursor_start': s, 'metadata': {_EXPERIMENTAL_KEY_NAME: comps}, 'status': 'ok'} def do_inspect(self, code, cursor_pos, detail_level=0): name = token_at_cursor(code, cursor_pos) reply_content = {'status' : 'ok'} reply_content['data'] = {} reply_content['metadata'] = {} try: reply_content['data'].update( self.shell.object_inspect_mime( name, detail_level=detail_level ) ) if not self.shell.enable_html_pager: reply_content['data'].pop('text/html') reply_content['found'] = True except KeyError: reply_content['found'] = False return reply_content def do_history(self, hist_access_type, output, raw, session=0, start=0, stop=None, n=None, pattern=None, unique=False): if hist_access_type == 'tail': hist = self.shell.history_manager.get_tail(n, raw=raw, output=output, include_latest=True) elif hist_access_type == 'range': hist = self.shell.history_manager.get_range(session, start, stop, raw=raw, output=output) elif hist_access_type == 'search': hist = self.shell.history_manager.search( pattern, raw=raw, output=output, n=n, unique=unique) else: hist = [] return { 'status': 'ok', 'history' : list(hist), } def do_shutdown(self, restart): self.shell.exit_now = True return dict(status='ok', restart=restart) def do_is_complete(self, code): transformer_manager = getattr(self.shell, 'input_transformer_manager', None) if transformer_manager is None: # input_splitter attribute is deprecated transformer_manager = self.shell.input_splitter status, indent_spaces = transformer_manager.check_complete(code) r = {'status': status} if status == 'incomplete': r['indent'] = ' ' * indent_spaces return r def do_apply(self, content, bufs, msg_id, reply_metadata): from .serialize import serialize_object, unpack_apply_message shell = self.shell try: working = shell.user_ns prefix = "_"+str(msg_id).replace("-","")+"_" f,args,kwargs = unpack_apply_message(bufs, working, copy=False) fname = getattr(f, '__name__', 'f') fname = prefix+"f" argname = prefix+"args" kwargname = prefix+"kwargs" resultname = prefix+"result" ns = { fname : f, argname : args, kwargname : kwargs , resultname : None } # print ns working.update(ns) code = "%s = %s(*%s,**%s)" % (resultname, fname, argname, kwargname) try: exec(code, shell.user_global_ns, shell.user_ns) result = working.get(resultname) finally: for key in ns: working.pop(key) result_buf = serialize_object(result, buffer_threshold=self.session.buffer_threshold, item_threshold=self.session.item_threshold, ) except BaseException as e: # invoke IPython traceback formatting shell.showtraceback() reply_content = { u'traceback': shell._last_traceback or [], u'ename': unicode_type(type(e).__name__), u'evalue': safe_unicode(e), } # FIXME: deprecated piece for ipyparallel (remove in 5.0): e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='apply') reply_content['engine_info'] = e_info self.send_response(self.iopub_socket, u'error', reply_content, ident=self._topic('error')) self.log.info("Exception in apply request:\n%s", '\n'.join(reply_content['traceback'])) result_buf = [] reply_content['status'] = 'error' else: reply_content = {'status' : 'ok'} return reply_content, result_buf def do_clear(self): self.shell.reset(False) return dict(status='ok') # This exists only for backwards compatibility - use IPythonKernel instead class Kernel(IPythonKernel): def __init__(self, *args, **kwargs): import warnings warnings.warn('Kernel is a deprecated alias of ipykernel.ipkernel.IPythonKernel', DeprecationWarning) super(Kernel, self).__init__(*args, **kwargs) ipykernel-5.2.0/ipykernel/jsonutil.py000066400000000000000000000145751363550014400177600ustar00rootroot00000000000000"""Utilities to manipulate JSON objects.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from binascii import b2a_base64 import math import re import types from datetime import datetime import numbers from ipython_genutils import py3compat from ipython_genutils.py3compat import unicode_type, iteritems from ipython_genutils.encoding import DEFAULT_ENCODING next_attr_name = '__next__' if py3compat.PY3 else 'next' #----------------------------------------------------------------------------- # Globals and constants #----------------------------------------------------------------------------- # timestamp formats ISO8601 = "%Y-%m-%dT%H:%M:%S.%f" ISO8601_PAT=re.compile(r"^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})(\.\d{1,6})?Z?([\+\-]\d{2}:?\d{2})?$") # holy crap, strptime is not threadsafe. # Calling it once at import seems to help. datetime.strptime("1", "%d") #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- # constants for identifying png/jpeg data PNG = b'\x89PNG\r\n\x1a\n' # front of PNG base64-encoded PNG64 = b'iVBORw0KG' JPEG = b'\xff\xd8' # front of JPEG base64-encoded JPEG64 = b'/9' # constants for identifying gif data GIF_64 = b'R0lGODdh' GIF89_64 = b'R0lGODlh' # front of PDF base64-encoded PDF64 = b'JVBER' def encode_images(format_dict): """b64-encodes images in a displaypub format dict Perhaps this should be handled in json_clean itself? Parameters ---------- format_dict : dict A dictionary of display data keyed by mime-type Returns ------- format_dict : dict A copy of the same dictionary, but binary image data ('image/png', 'image/jpeg' or 'application/pdf') is base64-encoded. """ # no need for handling of ambiguous bytestrings on Python 3, # where bytes objects always represent binary data and thus # base64-encoded. if py3compat.PY3: return format_dict encoded = format_dict.copy() pngdata = format_dict.get('image/png') if isinstance(pngdata, bytes): # make sure we don't double-encode if not pngdata.startswith(PNG64): pngdata = b2a_base64(pngdata) encoded['image/png'] = pngdata.decode('ascii') jpegdata = format_dict.get('image/jpeg') if isinstance(jpegdata, bytes): # make sure we don't double-encode if not jpegdata.startswith(JPEG64): jpegdata = b2a_base64(jpegdata) encoded['image/jpeg'] = jpegdata.decode('ascii') gifdata = format_dict.get('image/gif') if isinstance(gifdata, bytes): # make sure we don't double-encode if not gifdata.startswith((GIF_64, GIF89_64)): gifdata = b2a_base64(gifdata) encoded['image/gif'] = gifdata.decode('ascii') pdfdata = format_dict.get('application/pdf') if isinstance(pdfdata, bytes): # make sure we don't double-encode if not pdfdata.startswith(PDF64): pdfdata = b2a_base64(pdfdata) encoded['application/pdf'] = pdfdata.decode('ascii') return encoded def json_clean(obj): """Clean an object to ensure it's safe to encode in JSON. Atomic, immutable objects are returned unmodified. Sets and tuples are converted to lists, lists are copied and dicts are also copied. Note: dicts whose keys could cause collisions upon encoding (such as a dict with both the number 1 and the string '1' as keys) will cause a ValueError to be raised. Parameters ---------- obj : any python object Returns ------- out : object A version of the input which will not cause an encoding error when encoded as JSON. Note that this function does not *encode* its inputs, it simply sanitizes it so that there will be no encoding errors later. """ # types that are 'atomic' and ok in json as-is. atomic_ok = (unicode_type, type(None)) # containers that we need to convert into lists container_to_list = (tuple, set, types.GeneratorType) # Since bools are a subtype of Integrals, which are a subtype of Reals, # we have to check them in that order. if isinstance(obj, bool): return obj if isinstance(obj, numbers.Integral): # cast int to int, in case subclasses override __str__ (e.g. boost enum, #4598) return int(obj) if isinstance(obj, numbers.Real): # cast out-of-range floats to their reprs if math.isnan(obj) or math.isinf(obj): return repr(obj) return float(obj) if isinstance(obj, atomic_ok): return obj if isinstance(obj, bytes): if py3compat.PY3: # unanmbiguous binary data is base64-encoded # (this probably should have happened upstream) return b2a_base64(obj).decode('ascii') else: # Python 2 bytestr is ambiguous, # needs special handling for possible binary bytestrings. # imperfect workaround: if ascii, assume text. # otherwise assume binary, base64-encode (py3 behavior). try: return obj.decode('ascii') except UnicodeDecodeError: return b2a_base64(obj).decode('ascii') if isinstance(obj, container_to_list) or ( hasattr(obj, '__iter__') and hasattr(obj, next_attr_name)): obj = list(obj) if isinstance(obj, list): return [json_clean(x) for x in obj] if isinstance(obj, dict): # First, validate that the dict won't lose data in conversion due to # key collisions after stringification. This can happen with keys like # True and 'true' or 1 and '1', which collide in JSON. nkeys = len(obj) nkeys_collapsed = len(set(map(unicode_type, obj))) if nkeys != nkeys_collapsed: raise ValueError('dict cannot be safely converted to JSON: ' 'key collision would lead to dropped values') # If all OK, proceed by making the new dict that will be json-safe out = {} for k,v in iteritems(obj): out[unicode_type(k)] = json_clean(v) return out if isinstance(obj, datetime): return obj.strftime(ISO8601) # we don't understand it, it's probably an unserializable object raise ValueError("Can't clean for JSON: %r" % obj) ipykernel-5.2.0/ipykernel/kernelapp.py000066400000000000000000000561031363550014400200630ustar00rootroot00000000000000"""An Application for launching a kernel""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import print_function import atexit import os import sys import errno import signal import traceback import logging from tornado import ioloop import zmq from zmq.eventloop import ioloop as zmq_ioloop from zmq.eventloop.zmqstream import ZMQStream from IPython.core.application import ( BaseIPythonApplication, base_flags, base_aliases, catch_config_error ) from IPython.core.profiledir import ProfileDir from IPython.core.shellapp import ( InteractiveShellApp, shell_flags, shell_aliases ) from ipython_genutils.path import filefind, ensure_dir_exists from traitlets import ( Any, Instance, Dict, Unicode, Integer, Bool, DottedObjectName, Type, default ) from ipython_genutils.importstring import import_item from jupyter_core.paths import jupyter_runtime_dir from jupyter_client import write_connection_file from jupyter_client.connect import ConnectionFileMixin # local imports from .iostream import IOPubThread from .heartbeat import Heartbeat from .ipkernel import IPythonKernel from .parentpoller import ParentPollerUnix, ParentPollerWindows from jupyter_client.session import ( Session, session_flags, session_aliases, ) from .zmqshell import ZMQInteractiveShell #----------------------------------------------------------------------------- # Flags and Aliases #----------------------------------------------------------------------------- kernel_aliases = dict(base_aliases) kernel_aliases.update({ 'ip' : 'IPKernelApp.ip', 'hb' : 'IPKernelApp.hb_port', 'shell' : 'IPKernelApp.shell_port', 'iopub' : 'IPKernelApp.iopub_port', 'stdin' : 'IPKernelApp.stdin_port', 'control' : 'IPKernelApp.control_port', 'f' : 'IPKernelApp.connection_file', 'transport': 'IPKernelApp.transport', }) kernel_flags = dict(base_flags) kernel_flags.update({ 'no-stdout' : ( {'IPKernelApp' : {'no_stdout' : True}}, "redirect stdout to the null device"), 'no-stderr' : ( {'IPKernelApp' : {'no_stderr' : True}}, "redirect stderr to the null device"), 'pylab' : ( {'IPKernelApp' : {'pylab' : 'auto'}}, """Pre-load matplotlib and numpy for interactive use with the default matplotlib backend."""), }) # inherit flags&aliases for any IPython shell apps kernel_aliases.update(shell_aliases) kernel_flags.update(shell_flags) # inherit flags&aliases for Sessions kernel_aliases.update(session_aliases) kernel_flags.update(session_flags) _ctrl_c_message = """\ NOTE: When using the `ipython kernel` entry point, Ctrl-C will not work. To exit, you will have to explicitly quit this process, by either sending "quit" from a client, or using Ctrl-\\ in UNIX-like environments. To read more about this, see https://github.com/ipython/ipython/issues/2049 """ #----------------------------------------------------------------------------- # Application class for starting an IPython Kernel #----------------------------------------------------------------------------- class IPKernelApp(BaseIPythonApplication, InteractiveShellApp, ConnectionFileMixin): name='ipython-kernel' aliases = Dict(kernel_aliases) flags = Dict(kernel_flags) classes = [IPythonKernel, ZMQInteractiveShell, ProfileDir, Session] # the kernel class, as an importstring kernel_class = Type('ipykernel.ipkernel.IPythonKernel', klass='ipykernel.kernelbase.Kernel', help="""The Kernel subclass to be used. This should allow easy re-use of the IPKernelApp entry point to configure and launch kernels other than IPython's own. """).tag(config=True) kernel = Any() poller = Any() # don't restrict this even though current pollers are all Threads heartbeat = Instance(Heartbeat, allow_none=True) context = Any() shell_socket = Any() control_socket = Any() stdin_socket = Any() iopub_socket = Any() iopub_thread = Any() ports = Dict() subcommands = { 'install': ( 'ipykernel.kernelspec.InstallIPythonKernelSpecApp', 'Install the IPython kernel' ), } # connection info: connection_dir = Unicode() @default('connection_dir') def _default_connection_dir(self): return jupyter_runtime_dir() @property def abs_connection_file(self): if os.path.basename(self.connection_file) == self.connection_file: return os.path.join(self.connection_dir, self.connection_file) else: return self.connection_file # streams, etc. no_stdout = Bool(False, help="redirect stdout to the null device").tag(config=True) no_stderr = Bool(False, help="redirect stderr to the null device").tag(config=True) quiet = Bool(True, help="Only send stdout/stderr to output stream").tag(config=True) outstream_class = DottedObjectName('ipykernel.iostream.OutStream', help="The importstring for the OutStream factory").tag(config=True) displayhook_class = DottedObjectName('ipykernel.displayhook.ZMQDisplayHook', help="The importstring for the DisplayHook factory").tag(config=True) # polling parent_handle = Integer(int(os.environ.get('JPY_PARENT_PID') or 0), help="""kill this process if its parent dies. On Windows, the argument specifies the HANDLE of the parent process, otherwise it is simply boolean. """).tag(config=True) interrupt = Integer(int(os.environ.get('JPY_INTERRUPT_EVENT') or 0), help="""ONLY USED ON WINDOWS Interrupt this process when the parent is signaled. """).tag(config=True) def init_crash_handler(self): sys.excepthook = self.excepthook def excepthook(self, etype, evalue, tb): # write uncaught traceback to 'real' stderr, not zmq-forwarder traceback.print_exception(etype, evalue, tb, file=sys.__stderr__) def init_poller(self): if sys.platform == 'win32': if self.interrupt or self.parent_handle: self.poller = ParentPollerWindows(self.interrupt, self.parent_handle) elif self.parent_handle and self.parent_handle != 1: # PID 1 (init) is special and will never go away, # only be reassigned. # Parent polling doesn't work if ppid == 1 to start with. self.poller = ParentPollerUnix() def _try_bind_socket(self, s, port): iface = '%s://%s' % (self.transport, self.ip) if self.transport == 'tcp': if port <= 0: port = s.bind_to_random_port(iface) else: s.bind("tcp://%s:%i" % (self.ip, port)) elif self.transport == 'ipc': if port <= 0: port = 1 path = "%s-%i" % (self.ip, port) while os.path.exists(path): port = port + 1 path = "%s-%i" % (self.ip, port) else: path = "%s-%i" % (self.ip, port) s.bind("ipc://%s" % path) return port def _bind_socket(self, s, port): try: win_in_use = errno.WSAEADDRINUSE except AttributeError: win_in_use = None # Try up to 100 times to bind a port when in conflict to avoid # infinite attempts in bad setups max_attempts = 1 if port else 100 for attempt in range(max_attempts): try: return self._try_bind_socket(s, port) except zmq.ZMQError as ze: # Raise if we have any error not related to socket binding if ze.errno != errno.EADDRINUSE and ze.errno != win_in_use: raise if attempt == max_attempts - 1: raise def write_connection_file(self): """write connection info to JSON file""" cf = self.abs_connection_file self.log.debug("Writing connection file: %s", cf) write_connection_file(cf, ip=self.ip, key=self.session.key, transport=self.transport, shell_port=self.shell_port, stdin_port=self.stdin_port, hb_port=self.hb_port, iopub_port=self.iopub_port, control_port=self.control_port) def cleanup_connection_file(self): cf = self.abs_connection_file self.log.debug("Cleaning up connection file: %s", cf) try: os.remove(cf) except (IOError, OSError): pass self.cleanup_ipc_files() def init_connection_file(self): if not self.connection_file: self.connection_file = "kernel-%s.json"%os.getpid() try: self.connection_file = filefind(self.connection_file, ['.', self.connection_dir]) except IOError: self.log.debug("Connection file not found: %s", self.connection_file) # This means I own it, and I'll create it in this directory: ensure_dir_exists(os.path.dirname(self.abs_connection_file), 0o700) # Also, I will clean it up: atexit.register(self.cleanup_connection_file) return try: self.load_connection_file() except Exception: self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True) self.exit(1) def init_sockets(self): # Create a context, a session, and the kernel sockets. self.log.info("Starting the kernel at pid: %i", os.getpid()) assert self.context is None, "init_sockets cannot be called twice!" self.context = context = zmq.Context() atexit.register(self.close) self.shell_socket = context.socket(zmq.ROUTER) self.shell_socket.linger = 1000 self.shell_port = self._bind_socket(self.shell_socket, self.shell_port) self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port) self.stdin_socket = context.socket(zmq.ROUTER) self.stdin_socket.linger = 1000 self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port) self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port) self.control_socket = context.socket(zmq.ROUTER) self.control_socket.linger = 1000 self.control_port = self._bind_socket(self.control_socket, self.control_port) self.log.debug("control ROUTER Channel on port: %i" % self.control_port) if hasattr(zmq, 'ROUTER_HANDOVER'): # set router-handover to workaround zeromq reconnect problems # in certain rare circumstances # see ipython/ipykernel#270 and zeromq/libzmq#2892 self.shell_socket.router_handover = \ self.control_socket.router_handover = \ self.stdin_socket.router_handover = 1 self.init_iopub(context) def init_iopub(self, context): self.iopub_socket = context.socket(zmq.PUB) self.iopub_socket.linger = 1000 self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port) self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port) self.configure_tornado_logger() self.iopub_thread = IOPubThread(self.iopub_socket, pipe=True) self.iopub_thread.start() # backward-compat: wrap iopub socket API in background thread self.iopub_socket = self.iopub_thread.background_socket def init_heartbeat(self): """start the heart beating""" # heartbeat doesn't share context, because it mustn't be blocked # by the GIL, which is accessed by libzmq when freeing zero-copy messages hb_ctx = zmq.Context() self.heartbeat = Heartbeat(hb_ctx, (self.transport, self.ip, self.hb_port)) self.hb_port = self.heartbeat.port self.log.debug("Heartbeat REP Channel on port: %i" % self.hb_port) self.heartbeat.start() def close(self): """Close zmq sockets in an orderly fashion""" # un-capture IO before we start closing channels self.reset_io() self.log.info("Cleaning up sockets") if self.heartbeat: self.log.debug("Closing heartbeat channel") self.heartbeat.context.term() if self.iopub_thread: self.log.debug("Closing iopub channel") self.iopub_thread.stop() self.iopub_thread.close() for channel in ('shell', 'control', 'stdin'): self.log.debug("Closing %s channel", channel) socket = getattr(self, channel + "_socket", None) if socket and not socket.closed: socket.close() self.log.debug("Terminating zmq context") self.context.term() self.log.debug("Terminated zmq context") def log_connection_info(self): """display connection info, and store ports""" basename = os.path.basename(self.connection_file) if basename == self.connection_file or \ os.path.dirname(self.connection_file) == self.connection_dir: # use shortname tail = basename else: tail = self.connection_file lines = [ "To connect another client to this kernel, use:", " --existing %s" % tail, ] # log connection info # info-level, so often not shown. # frontends should use the %connect_info magic # to see the connection info for line in lines: self.log.info(line) # also raw print to the terminal if no parent_handle (`ipython kernel`) # unless log-level is CRITICAL (--quiet) if not self.parent_handle and self.log_level < logging.CRITICAL: print(_ctrl_c_message, file=sys.__stdout__) for line in lines: print(line, file=sys.__stdout__) self.ports = dict(shell=self.shell_port, iopub=self.iopub_port, stdin=self.stdin_port, hb=self.hb_port, control=self.control_port) def init_blackhole(self): """redirects stdout/stderr to devnull if necessary""" if self.no_stdout or self.no_stderr: blackhole = open(os.devnull, 'w') if self.no_stdout: sys.stdout = sys.__stdout__ = blackhole if self.no_stderr: sys.stderr = sys.__stderr__ = blackhole def init_io(self): """Redirect input streams and set a display hook.""" if self.outstream_class: outstream_factory = import_item(str(self.outstream_class)) if sys.stdout is not None: sys.stdout.flush() e_stdout = None if self.quiet else sys.__stdout__ e_stderr = None if self.quiet else sys.__stderr__ sys.stdout = outstream_factory(self.session, self.iopub_thread, u'stdout', echo=e_stdout) if sys.stderr is not None: sys.stderr.flush() sys.stderr = outstream_factory(self.session, self.iopub_thread, u'stderr', echo=e_stderr) if self.displayhook_class: displayhook_factory = import_item(str(self.displayhook_class)) self.displayhook = displayhook_factory(self.session, self.iopub_socket) sys.displayhook = self.displayhook self.patch_io() def reset_io(self): """restore original io restores state after init_io """ sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ sys.displayhook = sys.__displayhook__ def patch_io(self): """Patch important libraries that can't handle sys.stdout forwarding""" try: import faulthandler except ImportError: pass else: # Warning: this is a monkeypatch of `faulthandler.enable`, watch for possible # updates to the upstream API and update accordingly (up-to-date as of Python 3.5): # https://docs.python.org/3/library/faulthandler.html#faulthandler.enable # change default file to __stderr__ from forwarded stderr faulthandler_enable = faulthandler.enable def enable(file=sys.__stderr__, all_threads=True, **kwargs): return faulthandler_enable(file=file, all_threads=all_threads, **kwargs) faulthandler.enable = enable if hasattr(faulthandler, 'register'): faulthandler_register = faulthandler.register def register(signum, file=sys.__stderr__, all_threads=True, chain=False, **kwargs): return faulthandler_register(signum, file=file, all_threads=all_threads, chain=chain, **kwargs) faulthandler.register = register def init_signal(self): signal.signal(signal.SIGINT, signal.SIG_IGN) def init_kernel(self): """Create the Kernel object itself""" shell_stream = ZMQStream(self.shell_socket) control_stream = ZMQStream(self.control_socket) kernel_factory = self.kernel_class.instance kernel = kernel_factory(parent=self, session=self.session, control_stream=control_stream, shell_streams=[shell_stream, control_stream], iopub_thread=self.iopub_thread, iopub_socket=self.iopub_socket, stdin_socket=self.stdin_socket, log=self.log, profile_dir=self.profile_dir, user_ns=self.user_ns, ) kernel.record_ports({ name + '_port': port for name, port in self.ports.items() }) self.kernel = kernel # Allow the displayhook to get the execution count self.displayhook.get_execution_count = lambda: kernel.execution_count def init_gui_pylab(self): """Enable GUI event loop integration, taking pylab into account.""" # Register inline backend as default # this is higher priority than matplotlibrc, # but lower priority than anything else (mpl.use() for instance). # This only affects matplotlib >= 1.5 if not os.environ.get('MPLBACKEND'): os.environ['MPLBACKEND'] = 'module://ipykernel.pylab.backend_inline' # Provide a wrapper for :meth:`InteractiveShellApp.init_gui_pylab` # to ensure that any exception is printed straight to stderr. # Normally _showtraceback associates the reply with an execution, # which means frontends will never draw it, as this exception # is not associated with any execute request. shell = self.shell _showtraceback = shell._showtraceback try: # replace error-sending traceback with stderr def print_tb(etype, evalue, stb): print ("GUI event loop or pylab initialization failed", file=sys.stderr) print (shell.InteractiveTB.stb2text(stb), file=sys.stderr) shell._showtraceback = print_tb InteractiveShellApp.init_gui_pylab(self) finally: shell._showtraceback = _showtraceback def init_shell(self): self.shell = getattr(self.kernel, 'shell', None) if self.shell: self.shell.configurables.append(self) def configure_tornado_logger(self): """ Configure the tornado logging.Logger. Must set up the tornado logger or else tornado will call basicConfig for the root logger which makes the root logger go to the real sys.stderr instead of the capture streams. This function mimics the setup of logging.basicConfig. """ logger = logging.getLogger('tornado') handler = logging.StreamHandler() formatter = logging.Formatter(logging.BASIC_FORMAT) handler.setFormatter(formatter) logger.addHandler(handler) def _init_asyncio_patch(self): """set default asyncio policy to be compatible with tornado Tornado 6 (at least) is not compatible with the default asyncio implementation on Windows Pick the older SelectorEventLoopPolicy on Windows if the known-incompatible default policy is in use. do this as early as possible to make it a low priority and overrideable ref: https://github.com/tornadoweb/tornado/issues/2608 FIXME: if/when tornado supports the defaults in asyncio, remove and bump tornado requirement for py38 """ if sys.platform.startswith("win") and sys.version_info >= (3, 8): import asyncio try: from asyncio import ( WindowsProactorEventLoopPolicy, WindowsSelectorEventLoopPolicy, ) except ImportError: pass # not affected else: if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy: # WindowsProactorEventLoopPolicy is not compatible with tornado 6 # fallback to the pre-3.8 default of Selector asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy()) @catch_config_error def initialize(self, argv=None): self._init_asyncio_patch() super(IPKernelApp, self).initialize(argv) if self.subapp is not None: return self.init_blackhole() self.init_connection_file() self.init_poller() self.init_sockets() self.init_heartbeat() # writing/displaying connection info must be *after* init_sockets/heartbeat self.write_connection_file() # Log connection info after writing connection file, so that the connection # file is definitely available at the time someone reads the log. self.log_connection_info() self.init_io() try: self.init_signal() except: # Catch exception when initializing signal fails, eg when running the # kernel on a separate thread if self.log_level < logging.CRITICAL: self.log.error("Unable to initialize signal:", exc_info=True) self.init_kernel() # shell init steps self.init_path() self.init_shell() if self.shell: self.init_gui_pylab() self.init_extensions() self.init_code() # flush stdout/stderr, so that anything written to these streams during # initialization do not get associated with the first execution request sys.stdout.flush() sys.stderr.flush() def start(self): if self.subapp is not None: return self.subapp.start() if self.poller is not None: self.poller.start() self.kernel.start() self.io_loop = ioloop.IOLoop.current() try: self.io_loop.start() except KeyboardInterrupt: pass launch_new_instance = IPKernelApp.launch_instance def main(): """Run an IPKernel as an application""" app = IPKernelApp.instance() app.initialize() app.start() if __name__ == '__main__': main() ipykernel-5.2.0/ipykernel/kernelbase.py000066400000000000000000000773061363550014400202250ustar00rootroot00000000000000"""Base class for a kernel that talks to frontends over 0MQ.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import print_function from datetime import datetime from functools import partial import itertools import logging from signal import signal, default_int_handler, SIGINT import sys import time import uuid try: # jupyter_client >= 5, use tz-aware now from jupyter_client.session import utcnow as now except ImportError: # jupyter_client < 5, use local now() now = datetime.now from tornado import ioloop from tornado import gen from tornado.queues import PriorityQueue, QueueEmpty import zmq from zmq.eventloop.zmqstream import ZMQStream from traitlets.config.configurable import SingletonConfigurable from IPython.core.error import StdinNotImplementedError from ipython_genutils import py3compat from ipython_genutils.py3compat import unicode_type, string_types from ipykernel.jsonutil import json_clean from traitlets import ( Any, Instance, Float, Dict, List, Set, Integer, Unicode, Bool, observe, default ) from jupyter_client.session import Session from ._version import kernel_protocol_version CONTROL_PRIORITY = 1 SHELL_PRIORITY = 10 ABORT_PRIORITY = 20 class Kernel(SingletonConfigurable): #--------------------------------------------------------------------------- # Kernel interface #--------------------------------------------------------------------------- # attribute to override with a GUI eventloop = Any(None) @observe('eventloop') def _update_eventloop(self, change): """schedule call to eventloop from IOLoop""" loop = ioloop.IOLoop.current() if change.new is not None: loop.add_callback(self.enter_eventloop) session = Instance(Session, allow_none=True) profile_dir = Instance('IPython.core.profiledir.ProfileDir', allow_none=True) shell_streams = List() control_stream = Instance(ZMQStream, allow_none=True) iopub_socket = Any() iopub_thread = Any() stdin_socket = Any() log = Instance(logging.Logger, allow_none=True) # identities: int_id = Integer(-1) ident = Unicode() @default('ident') def _default_ident(self): return unicode_type(uuid.uuid4()) # This should be overridden by wrapper kernels that implement any real # language. language_info = {} # any links that should go in the help menu help_links = List() # Private interface _darwin_app_nap = Bool(True, help="""Whether to use appnope for compatibility with OS X App Nap. Only affects OS X >= 10.9. """ ).tag(config=True) # track associations with current request _allow_stdin = Bool(False) _parent_header = Dict() _parent_ident = Any(b'') # Time to sleep after flushing the stdout/err buffers in each execute # cycle. While this introduces a hard limit on the minimal latency of the # execute cycle, it helps prevent output synchronization problems for # clients. # Units are in seconds. The minimum zmq latency on local host is probably # ~150 microseconds, set this to 500us for now. We may need to increase it # a little if it's not enough after more interactive testing. _execute_sleep = Float(0.0005).tag(config=True) # Frequency of the kernel's event loop. # Units are in seconds, kernel subclasses for GUI toolkits may need to # adapt to milliseconds. _poll_interval = Float(0.01).tag(config=True) stop_on_error_timeout = Float( 0.1, config=True, help="""time (in seconds) to wait for messages to arrive when aborting queued requests after an error. Requests that arrive within this window after an error will be cancelled. Increase in the event of unusually slow network causing significant delays, which can manifest as e.g. "Run all" in a notebook aborting some, but not all, messages after an error. """ ) # If the shutdown was requested over the network, we leave here the # necessary reply message so it can be sent by our registered atexit # handler. This ensures that the reply is only sent to clients truly at # the end of our shutdown process (which happens after the underlying # IPython shell's own shutdown). _shutdown_message = None # This is a dict of port number that the kernel is listening on. It is set # by record_ports and used by connect_request. _recorded_ports = Dict() # set of aborted msg_ids aborted = Set() # Track execution count here. For IPython, we override this to use the # execution count we store in the shell. execution_count = 0 msg_types = [ 'execute_request', 'complete_request', 'inspect_request', 'history_request', 'comm_info_request', 'kernel_info_request', 'connect_request', 'shutdown_request', 'is_complete_request', # deprecated: 'apply_request', ] # add deprecated ipyparallel control messages control_msg_types = msg_types + ['clear_request', 'abort_request'] def __init__(self, **kwargs): super(Kernel, self).__init__(**kwargs) # Build dict of handlers for message types self.shell_handlers = {} for msg_type in self.msg_types: self.shell_handlers[msg_type] = getattr(self, msg_type) self.control_handlers = {} for msg_type in self.control_msg_types: self.control_handlers[msg_type] = getattr(self, msg_type) @gen.coroutine def dispatch_control(self, msg): """dispatch control requests""" idents, msg = self.session.feed_identities(msg, copy=False) try: msg = self.session.deserialize(msg, content=True, copy=False) except: self.log.error("Invalid Control Message", exc_info=True) return self.log.debug("Control received: %s", msg) # Set the parent message for side effects. self.set_parent(idents, msg) self._publish_status(u'busy') if self._aborting: self._send_abort_reply(self.control_stream, msg, idents) self._publish_status(u'idle') return header = msg['header'] msg_type = header['msg_type'] handler = self.control_handlers.get(msg_type, None) if handler is None: self.log.error("UNKNOWN CONTROL MESSAGE TYPE: %r", msg_type) else: try: yield gen.maybe_future(handler(self.control_stream, idents, msg)) except Exception: self.log.error("Exception in control handler:", exc_info=True) sys.stdout.flush() sys.stderr.flush() self._publish_status(u'idle') # flush to ensure reply is sent self.control_stream.flush(zmq.POLLOUT) def should_handle(self, stream, msg, idents): """Check whether a shell-channel message should be handled Allows subclasses to prevent handling of certain messages (e.g. aborted requests). """ msg_id = msg['header']['msg_id'] if msg_id in self.aborted: msg_type = msg['header']['msg_type'] # is it safe to assume a msg_id will not be resubmitted? self.aborted.remove(msg_id) self._send_abort_reply(stream, msg, idents) return False return True @gen.coroutine def dispatch_shell(self, stream, msg): """dispatch shell requests""" idents, msg = self.session.feed_identities(msg, copy=False) try: msg = self.session.deserialize(msg, content=True, copy=False) except: self.log.error("Invalid Message", exc_info=True) return # Set the parent message for side effects. self.set_parent(idents, msg) self._publish_status(u'busy') if self._aborting: self._send_abort_reply(stream, msg, idents) self._publish_status(u'idle') # flush to ensure reply is sent before # handling the next request stream.flush(zmq.POLLOUT) return msg_type = msg['header']['msg_type'] # Print some info about this message and leave a '--->' marker, so it's # easier to trace visually the message chain when debugging. Each # handler prints its message at the end. self.log.debug('\n*** MESSAGE TYPE:%s***', msg_type) self.log.debug(' Content: %s\n --->\n ', msg['content']) if not self.should_handle(stream, msg, idents): return handler = self.shell_handlers.get(msg_type, None) if handler is None: self.log.warning("Unknown message type: %r", msg_type) else: self.log.debug("%s: %s", msg_type, msg) try: self.pre_handler_hook() except Exception: self.log.debug("Unable to signal in pre_handler_hook:", exc_info=True) try: yield gen.maybe_future(handler(stream, idents, msg)) except Exception: self.log.error("Exception in message handler:", exc_info=True) finally: try: self.post_handler_hook() except Exception: self.log.debug("Unable to signal in post_handler_hook:", exc_info=True) sys.stdout.flush() sys.stderr.flush() self._publish_status(u'idle') # flush to ensure reply is sent before # handling the next request stream.flush(zmq.POLLOUT) def pre_handler_hook(self): """Hook to execute before calling message handler""" # ensure default_int_handler during handler call self.saved_sigint_handler = signal(SIGINT, default_int_handler) def post_handler_hook(self): """Hook to execute after calling message handler""" signal(SIGINT, self.saved_sigint_handler) def enter_eventloop(self): """enter eventloop""" self.log.info("Entering eventloop %s", self.eventloop) # record handle, so we can check when this changes eventloop = self.eventloop if eventloop is None: self.log.info("Exiting as there is no eventloop") return def advance_eventloop(): # check if eventloop changed: if self.eventloop is not eventloop: self.log.info("exiting eventloop %s", eventloop) return if self.msg_queue.qsize(): self.log.debug("Delaying eventloop due to waiting messages") # still messages to process, make the eventloop wait schedule_next() return self.log.debug("Advancing eventloop %s", eventloop) try: eventloop(self) except KeyboardInterrupt: # Ctrl-C shouldn't crash the kernel self.log.error("KeyboardInterrupt caught in kernel") pass if self.eventloop is eventloop: # schedule advance again schedule_next() def schedule_next(): """Schedule the next advance of the eventloop""" # flush the eventloop every so often, # giving us a chance to handle messages in the meantime self.log.debug("Scheduling eventloop advance") self.io_loop.call_later(1, advance_eventloop) # begin polling the eventloop schedule_next() @gen.coroutine def do_one_iteration(self): """Process a single shell message Any pending control messages will be flushed as well .. versionchanged:: 5 This is now a coroutine """ # flush messages off of shell streams into the message queue for stream in self.shell_streams: stream.flush() # process all messages higher priority than shell (control), # and at most one shell message per iteration priority = 0 while priority is not None and priority < SHELL_PRIORITY: priority = yield self.process_one(wait=False) @gen.coroutine def process_one(self, wait=True): """Process one request Returns priority of the message handled. Returns None if no message was handled. """ if wait: priority, t, dispatch, args = yield self.msg_queue.get() else: try: priority, t, dispatch, args = self.msg_queue.get_nowait() except QueueEmpty: return None yield gen.maybe_future(dispatch(*args)) @gen.coroutine def dispatch_queue(self): """Coroutine to preserve order of message handling Ensures that only one message is processing at a time, even when the handler is async """ while True: # ensure control stream is flushed before processing shell messages if self.control_stream: self.control_stream.flush() # receive the next message and handle it try: yield self.process_one() except Exception: self.log.exception("Error in message handler") _message_counter = Any( help="""Monotonic counter of messages Ensures messages of the same priority are handled in arrival order. """, ) @default('_message_counter') def _message_counter_default(self): return itertools.count() def schedule_dispatch(self, priority, dispatch, *args): """schedule a message for dispatch""" idx = next(self._message_counter) self.msg_queue.put_nowait( ( priority, idx, dispatch, args, ) ) # ensure the eventloop wakes up self.io_loop.add_callback(lambda: None) def start(self): """register dispatchers for streams""" self.io_loop = ioloop.IOLoop.current() self.msg_queue = PriorityQueue() self.io_loop.add_callback(self.dispatch_queue) if self.control_stream: self.control_stream.on_recv( partial( self.schedule_dispatch, CONTROL_PRIORITY, self.dispatch_control, ), copy=False, ) for s in self.shell_streams: if s is self.control_stream: continue s.on_recv( partial( self.schedule_dispatch, SHELL_PRIORITY, self.dispatch_shell, s, ), copy=False, ) # publish idle status self._publish_status('starting') def record_ports(self, ports): """Record the ports that this kernel is using. The creator of the Kernel instance must call this methods if they want the :meth:`connect_request` method to return the port numbers. """ self._recorded_ports = ports #--------------------------------------------------------------------------- # Kernel request handlers #--------------------------------------------------------------------------- def _publish_execute_input(self, code, parent, execution_count): """Publish the code request on the iopub stream.""" self.session.send(self.iopub_socket, u'execute_input', {u'code':code, u'execution_count': execution_count}, parent=parent, ident=self._topic('execute_input') ) def _publish_status(self, status, parent=None): """send status (busy/idle) on IOPub""" self.session.send(self.iopub_socket, u'status', {u'execution_state': status}, parent=parent or self._parent_header, ident=self._topic('status'), ) def set_parent(self, ident, parent): """Set the current parent_header Side effects (IOPub messages) and replies are associated with the request that caused them via the parent_header. The parent identity is used to route input_request messages on the stdin channel. """ self._parent_ident = ident self._parent_header = parent def send_response(self, stream, msg_or_type, content=None, ident=None, buffers=None, track=False, header=None, metadata=None): """Send a response to the message we're currently processing. This accepts all the parameters of :meth:`jupyter_client.session.Session.send` except ``parent``. This relies on :meth:`set_parent` having been called for the current message. """ return self.session.send(stream, msg_or_type, content, self._parent_header, ident, buffers, track, header, metadata) def init_metadata(self, parent): """Initialize metadata. Run at the beginning of execution requests. """ # FIXME: `started` is part of ipyparallel # Remove for ipykernel 5.0 return { 'started': now(), } def finish_metadata(self, parent, metadata, reply_content): """Finish populating metadata. Run after completing an execution request. """ return metadata @gen.coroutine def execute_request(self, stream, ident, parent): """handle an execute_request""" try: content = parent[u'content'] code = py3compat.cast_unicode_py2(content[u'code']) silent = content[u'silent'] store_history = content.get(u'store_history', not silent) user_expressions = content.get('user_expressions', {}) allow_stdin = content.get('allow_stdin', False) except: self.log.error("Got bad msg: ") self.log.error("%s", parent) return stop_on_error = content.get('stop_on_error', True) metadata = self.init_metadata(parent) # Re-broadcast our input for the benefit of listening clients, and # start computing output if not silent: self.execution_count += 1 self._publish_execute_input(code, parent, self.execution_count) reply_content = yield gen.maybe_future( self.do_execute( code, silent, store_history, user_expressions, allow_stdin, ) ) # Flush output before sending the reply. sys.stdout.flush() sys.stderr.flush() # FIXME: on rare occasions, the flush doesn't seem to make it to the # clients... This seems to mitigate the problem, but we definitely need # to better understand what's going on. if self._execute_sleep: time.sleep(self._execute_sleep) # Send the reply. reply_content = json_clean(reply_content) metadata = self.finish_metadata(parent, metadata, reply_content) reply_msg = self.session.send(stream, u'execute_reply', reply_content, parent, metadata=metadata, ident=ident) self.log.debug("%s", reply_msg) if not silent and reply_msg['content']['status'] == u'error' and stop_on_error: yield self._abort_queues() def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False): """Execute user code. Must be overridden by subclasses. """ raise NotImplementedError @gen.coroutine def complete_request(self, stream, ident, parent): content = parent['content'] code = content['code'] cursor_pos = content['cursor_pos'] matches = yield gen.maybe_future(self.do_complete(code, cursor_pos)) matches = json_clean(matches) completion_msg = self.session.send(stream, 'complete_reply', matches, parent, ident) def do_complete(self, code, cursor_pos): """Override in subclasses to find completions. """ return {'matches' : [], 'cursor_end' : cursor_pos, 'cursor_start' : cursor_pos, 'metadata' : {}, 'status' : 'ok'} @gen.coroutine def inspect_request(self, stream, ident, parent): content = parent['content'] reply_content = yield gen.maybe_future( self.do_inspect( content['code'], content['cursor_pos'], content.get('detail_level', 0), ) ) # Before we send this object over, we scrub it for JSON usage reply_content = json_clean(reply_content) msg = self.session.send(stream, 'inspect_reply', reply_content, parent, ident) self.log.debug("%s", msg) def do_inspect(self, code, cursor_pos, detail_level=0): """Override in subclasses to allow introspection. """ return {'status': 'ok', 'data': {}, 'metadata': {}, 'found': False} @gen.coroutine def history_request(self, stream, ident, parent): content = parent['content'] reply_content = yield gen.maybe_future(self.do_history(**content)) reply_content = json_clean(reply_content) msg = self.session.send(stream, 'history_reply', reply_content, parent, ident) self.log.debug("%s", msg) def do_history(self, hist_access_type, output, raw, session=None, start=None, stop=None, n=None, pattern=None, unique=False): """Override in subclasses to access history. """ return {'status': 'ok', 'history': []} def connect_request(self, stream, ident, parent): if self._recorded_ports is not None: content = self._recorded_ports.copy() else: content = {} content['status'] = 'ok' msg = self.session.send(stream, 'connect_reply', content, parent, ident) self.log.debug("%s", msg) @property def kernel_info(self): return { 'protocol_version': kernel_protocol_version, 'implementation': self.implementation, 'implementation_version': self.implementation_version, 'language_info': self.language_info, 'banner': self.banner, 'help_links': self.help_links, } def kernel_info_request(self, stream, ident, parent): content = {'status': 'ok'} content.update(self.kernel_info) msg = self.session.send(stream, 'kernel_info_reply', content, parent, ident) self.log.debug("%s", msg) def comm_info_request(self, stream, ident, parent): content = parent['content'] target_name = content.get('target_name', None) # Should this be moved to ipkernel? if hasattr(self, 'comm_manager'): comms = { k: dict(target_name=v.target_name) for (k, v) in self.comm_manager.comms.items() if v.target_name == target_name or target_name is None } else: comms = {} reply_content = dict(comms=comms, status='ok') msg = self.session.send(stream, 'comm_info_reply', reply_content, parent, ident) self.log.debug("%s", msg) @gen.coroutine def shutdown_request(self, stream, ident, parent): content = yield gen.maybe_future(self.do_shutdown(parent['content']['restart'])) self.session.send(stream, u'shutdown_reply', content, parent, ident=ident) # same content, but different msg_id for broadcasting on IOPub self._shutdown_message = self.session.msg(u'shutdown_reply', content, parent ) self._at_shutdown() # call sys.exit after a short delay loop = ioloop.IOLoop.current() loop.add_timeout(time.time()+0.1, loop.stop) def do_shutdown(self, restart): """Override in subclasses to do things when the frontend shuts down the kernel. """ return {'status': 'ok', 'restart': restart} @gen.coroutine def is_complete_request(self, stream, ident, parent): content = parent['content'] code = content['code'] reply_content = yield gen.maybe_future(self.do_is_complete(code)) reply_content = json_clean(reply_content) reply_msg = self.session.send(stream, 'is_complete_reply', reply_content, parent, ident) self.log.debug("%s", reply_msg) def do_is_complete(self, code): """Override in subclasses to find completions. """ return {'status' : 'unknown', } #--------------------------------------------------------------------------- # Engine methods (DEPRECATED) #--------------------------------------------------------------------------- def apply_request(self, stream, ident, parent): self.log.warning("apply_request is deprecated in kernel_base, moving to ipyparallel.") try: content = parent[u'content'] bufs = parent[u'buffers'] msg_id = parent['header']['msg_id'] except: self.log.error("Got bad msg: %s", parent, exc_info=True) return md = self.init_metadata(parent) reply_content, result_buf = self.do_apply(content, bufs, msg_id, md) # flush i/o sys.stdout.flush() sys.stderr.flush() md = self.finish_metadata(parent, md, reply_content) self.session.send(stream, u'apply_reply', reply_content, parent=parent, ident=ident,buffers=result_buf, metadata=md) def do_apply(self, content, bufs, msg_id, reply_metadata): """DEPRECATED""" raise NotImplementedError #--------------------------------------------------------------------------- # Control messages (DEPRECATED) #--------------------------------------------------------------------------- def abort_request(self, stream, ident, parent): """abort a specific msg by id""" self.log.warning("abort_request is deprecated in kernel_base. It is only part of IPython parallel") msg_ids = parent['content'].get('msg_ids', None) if isinstance(msg_ids, string_types): msg_ids = [msg_ids] if not msg_ids: self._abort_queues() for mid in msg_ids: self.aborted.add(str(mid)) content = dict(status='ok') reply_msg = self.session.send(stream, 'abort_reply', content=content, parent=parent, ident=ident) self.log.debug("%s", reply_msg) def clear_request(self, stream, idents, parent): """Clear our namespace.""" self.log.warning("clear_request is deprecated in kernel_base. It is only part of IPython parallel") content = self.do_clear() self.session.send(stream, 'clear_reply', ident=idents, parent=parent, content = content) def do_clear(self): """DEPRECATED since 4.0.3""" raise NotImplementedError #--------------------------------------------------------------------------- # Protected interface #--------------------------------------------------------------------------- def _topic(self, topic): """prefixed topic for IOPub messages""" base = "kernel.%s" % self.ident return py3compat.cast_bytes("%s.%s" % (base, topic)) _aborting = Bool(False) @gen.coroutine def _abort_queues(self): for stream in self.shell_streams: stream.flush() self._aborting = True self.schedule_dispatch( ABORT_PRIORITY, self._dispatch_abort, ) @gen.coroutine def _dispatch_abort(self): self.log.info("Finishing abort") yield gen.sleep(self.stop_on_error_timeout) self._aborting = False def _send_abort_reply(self, stream, msg, idents): """Send a reply to an aborted request""" self.log.info("Aborting:") self.log.info("%s", msg) reply_type = msg['header']['msg_type'].rsplit('_', 1)[0] + '_reply' status = {'status': 'aborted'} md = {'engine': self.ident} md.update(status) self.session.send( stream, reply_type, metadata=md, content=status, parent=msg, ident=idents, ) def _no_raw_input(self): """Raise StdinNotImplentedError if active frontend doesn't support stdin.""" raise StdinNotImplementedError("raw_input was called, but this " "frontend does not support stdin.") def getpass(self, prompt='', stream=None): """Forward getpass to frontends Raises ------ StdinNotImplentedError if active frontend doesn't support stdin. """ if not self._allow_stdin: raise StdinNotImplementedError( "getpass was called, but this frontend does not support input requests." ) if stream is not None: import warnings warnings.warn("The `stream` parameter of `getpass.getpass` will have no effect when using ipykernel", UserWarning, stacklevel=2) return self._input_request(prompt, self._parent_ident, self._parent_header, password=True, ) def raw_input(self, prompt=''): """Forward raw_input to frontends Raises ------ StdinNotImplentedError if active frontend doesn't support stdin. """ if not self._allow_stdin: raise StdinNotImplementedError( "raw_input was called, but this frontend does not support input requests." ) return self._input_request(str(prompt), self._parent_ident, self._parent_header, password=False, ) def _input_request(self, prompt, ident, parent, password=False): # Flush output before making the request. sys.stderr.flush() sys.stdout.flush() # flush the stdin socket, to purge stale replies while True: try: self.stdin_socket.recv_multipart(zmq.NOBLOCK) except zmq.ZMQError as e: if e.errno == zmq.EAGAIN: break else: raise # Send the input request. content = json_clean(dict(prompt=prompt, password=password)) self.session.send(self.stdin_socket, u'input_request', content, parent, ident=ident) # Await a response. while True: try: ident, reply = self.session.recv(self.stdin_socket, 0) except Exception: self.log.warning("Invalid Message:", exc_info=True) except KeyboardInterrupt: # re-raise KeyboardInterrupt, to truncate traceback raise KeyboardInterrupt("Interrupted by user") from None else: break try: value = py3compat.unicode_to_str(reply['content']['value']) except: self.log.error("Bad input_reply: %s", parent) value = '' if value == '\x04': # EOF raise EOFError return value def _at_shutdown(self): """Actions taken at shutdown by the kernel, called by python's atexit. """ if self._shutdown_message is not None: self.session.send(self.iopub_socket, self._shutdown_message, ident=self._topic('shutdown')) self.log.debug("%s", self._shutdown_message) [ s.flush(zmq.POLLOUT) for s in self.shell_streams ] ipykernel-5.2.0/ipykernel/kernelspec.py000066400000000000000000000147221363550014400202360ustar00rootroot00000000000000"""The IPython kernel spec for Jupyter""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import print_function import errno import json import os import shutil import sys import tempfile from jupyter_client.kernelspec import KernelSpecManager pjoin = os.path.join KERNEL_NAME = 'python%i' % sys.version_info[0] # path to kernelspec resources RESOURCES = pjoin(os.path.dirname(__file__), 'resources') def make_ipkernel_cmd(mod='ipykernel_launcher', executable=None, extra_arguments=None, **kw): """Build Popen command list for launching an IPython kernel. Parameters ---------- mod : str, optional (default 'ipykernel') A string of an IPython module whose __main__ starts an IPython kernel executable : str, optional (default sys.executable) The Python executable to use for the kernel process. extra_arguments : list, optional A list of extra arguments to pass when executing the launch code. Returns ------- A Popen command list """ if executable is None: executable = sys.executable extra_arguments = extra_arguments or [] arguments = [executable, '-m', mod, '-f', '{connection_file}'] arguments.extend(extra_arguments) return arguments def get_kernel_dict(extra_arguments=None): """Construct dict for kernel.json""" return { 'argv': make_ipkernel_cmd(extra_arguments=extra_arguments), 'display_name': 'Python %i' % sys.version_info[0], 'language': 'python', } def write_kernel_spec(path=None, overrides=None, extra_arguments=None): """Write a kernel spec directory to `path` If `path` is not specified, a temporary directory is created. If `overrides` is given, the kernelspec JSON is updated before writing. The path to the kernelspec is always returned. """ if path is None: path = os.path.join(tempfile.mkdtemp(suffix='_kernels'), KERNEL_NAME) # stage resources shutil.copytree(RESOURCES, path) # write kernel.json kernel_dict = get_kernel_dict(extra_arguments) if overrides: kernel_dict.update(overrides) with open(pjoin(path, 'kernel.json'), 'w') as f: json.dump(kernel_dict, f, indent=1) return path def install(kernel_spec_manager=None, user=False, kernel_name=KERNEL_NAME, display_name=None, prefix=None, profile=None): """Install the IPython kernelspec for Jupyter Parameters ---------- kernel_spec_manager: KernelSpecManager [optional] A KernelSpecManager to use for installation. If none provided, a default instance will be created. user: bool [default: False] Whether to do a user-only install, or system-wide. kernel_name: str, optional Specify a name for the kernelspec. This is needed for having multiple IPython kernels for different environments. display_name: str, optional Specify the display name for the kernelspec profile: str, optional Specify a custom profile to be loaded by the kernel. prefix: str, optional Specify an install prefix for the kernelspec. This is needed to install into a non-default location, such as a conda/virtual-env. Returns ------- The path where the kernelspec was installed. """ if kernel_spec_manager is None: kernel_spec_manager = KernelSpecManager() if (kernel_name != KERNEL_NAME) and (display_name is None): # kernel_name is specified and display_name is not # default display_name to kernel_name display_name = kernel_name overrides = {} if display_name: overrides["display_name"] = display_name if profile: extra_arguments = ["--profile", profile] if not display_name: # add the profile to the default display name overrides["display_name"] = 'Python %i [profile=%s]' % (sys.version_info[0], profile) else: extra_arguments = None path = write_kernel_spec(overrides=overrides, extra_arguments=extra_arguments) dest = kernel_spec_manager.install_kernel_spec( path, kernel_name=kernel_name, user=user, prefix=prefix) # cleanup afterward shutil.rmtree(path) return dest # Entrypoint from traitlets.config import Application class InstallIPythonKernelSpecApp(Application): """Dummy app wrapping argparse""" name = 'ipython-kernel-install' def initialize(self, argv=None): if argv is None: argv = sys.argv[1:] self.argv = argv def start(self): import argparse parser = argparse.ArgumentParser(prog=self.name, description="Install the IPython kernel spec.") parser.add_argument('--user', action='store_true', help="Install for the current user instead of system-wide") parser.add_argument('--name', type=str, default=KERNEL_NAME, help="Specify a name for the kernelspec." " This is needed to have multiple IPython kernels at the same time.") parser.add_argument('--display-name', type=str, help="Specify the display name for the kernelspec." " This is helpful when you have multiple IPython kernels.") parser.add_argument('--profile', type=str, help="Specify an IPython profile to load. " "This can be used to create custom versions of the kernel.") parser.add_argument('--prefix', type=str, help="Specify an install prefix for the kernelspec." " This is needed to install into a non-default location, such as a conda/virtual-env.") parser.add_argument('--sys-prefix', action='store_const', const=sys.prefix, dest='prefix', help="Install to Python's sys.prefix." " Shorthand for --prefix='%s'. For use in conda/virtual-envs." % sys.prefix) opts = parser.parse_args(self.argv) try: dest = install(user=opts.user, kernel_name=opts.name, profile=opts.profile, prefix=opts.prefix, display_name=opts.display_name) except OSError as e: if e.errno == errno.EACCES: print(e, file=sys.stderr) if opts.user: print("Perhaps you want `sudo` or `--user`?", file=sys.stderr) self.exit(1) raise print("Installed kernelspec %s in %s" % (opts.name, dest)) if __name__ == '__main__': InstallIPythonKernelSpecApp.launch_instance() ipykernel-5.2.0/ipykernel/log.py000066400000000000000000000013761363550014400166650ustar00rootroot00000000000000from logging import INFO, DEBUG, WARN, ERROR, FATAL from zmq.log.handlers import PUBHandler import warnings warnings.warn("ipykernel.log is deprecated. It has moved to ipyparallel.engine.log", DeprecationWarning) class EnginePUBHandler(PUBHandler): """A simple PUBHandler subclass that sets root_topic""" engine=None def __init__(self, engine, *args, **kwargs): PUBHandler.__init__(self,*args, **kwargs) self.engine = engine @property def root_topic(self): """this is a property, in case the handler is created before the engine gets registered with an id""" if isinstance(getattr(self.engine, 'id', None), int): return "engine.%i"%self.engine.id else: return "engine" ipykernel-5.2.0/ipykernel/parentpoller.py000066400000000000000000000101171363550014400206040ustar00rootroot00000000000000# Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. try: import ctypes except: ctypes = None import os import platform import signal import time try: from _thread import interrupt_main # Py 3 except ImportError: from thread import interrupt_main # Py 2 from threading import Thread from traitlets.log import get_logger import warnings class ParentPollerUnix(Thread): """ A Unix-specific daemon thread that terminates the program immediately when the parent process no longer exists. """ def __init__(self): super(ParentPollerUnix, self).__init__() self.daemon = True def run(self): # We cannot use os.waitpid because it works only for child processes. from errno import EINTR while True: try: if os.getppid() == 1: get_logger().warning("Parent appears to have exited, shutting down.") os._exit(1) time.sleep(1.0) except OSError as e: if e.errno == EINTR: continue raise class ParentPollerWindows(Thread): """ A Windows-specific daemon thread that listens for a special event that signals an interrupt and, optionally, terminates the program immediately when the parent process no longer exists. """ def __init__(self, interrupt_handle=None, parent_handle=None): """ Create the poller. At least one of the optional parameters must be provided. Parameters ---------- interrupt_handle : HANDLE (int), optional If provided, the program will generate a Ctrl+C event when this handle is signaled. parent_handle : HANDLE (int), optional If provided, the program will terminate immediately when this handle is signaled. """ assert(interrupt_handle or parent_handle) super(ParentPollerWindows, self).__init__() if ctypes is None: raise ImportError("ParentPollerWindows requires ctypes") self.daemon = True self.interrupt_handle = interrupt_handle self.parent_handle = parent_handle def run(self): """ Run the poll loop. This method never returns. """ try: from _winapi import WAIT_OBJECT_0, INFINITE except ImportError: from _subprocess import WAIT_OBJECT_0, INFINITE # Build the list of handle to listen on. handles = [] if self.interrupt_handle: handles.append(self.interrupt_handle) if self.parent_handle: handles.append(self.parent_handle) arch = platform.architecture()[0] c_int = ctypes.c_int64 if arch.startswith('64') else ctypes.c_int # Listen forever. while True: result = ctypes.windll.kernel32.WaitForMultipleObjects( len(handles), # nCount (c_int * len(handles))(*handles), # lpHandles False, # bWaitAll INFINITE) # dwMilliseconds if WAIT_OBJECT_0 <= result < len(handles): handle = handles[result - WAIT_OBJECT_0] if handle == self.interrupt_handle: # check if signal handler is callable # to avoid 'int not callable' error (Python issue #23395) if callable(signal.getsignal(signal.SIGINT)): interrupt_main() elif handle == self.parent_handle: get_logger().warning("Parent appears to have exited, shutting down.") os._exit(1) elif result < 0: # wait failed, just give up and stop polling. warnings.warn("""Parent poll failed. If the frontend dies, the kernel may be left running. Please let us know about your system (bitness, Python, etc.) at ipython-dev@scipy.org""") return ipykernel-5.2.0/ipykernel/pickleutil.py000066400000000000000000000312471363550014400202510ustar00rootroot00000000000000# encoding: utf-8 """Pickle related utilities. Perhaps this should be called 'can'.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import warnings warnings.warn("ipykernel.pickleutil is deprecated. It has moved to ipyparallel.", DeprecationWarning) import copy import sys from types import FunctionType try: import cPickle as pickle except ImportError: import pickle from ipython_genutils import py3compat from ipython_genutils.importstring import import_item from ipython_genutils.py3compat import string_types, iteritems, buffer_to_bytes, buffer_to_bytes_py2 # This registers a hook when it's imported try: # available since ipyparallel 5.1.1 from ipyparallel.serialize import codeutil except ImportError: # Deprecated since ipykernel 4.3.1 from ipykernel import codeutil from traitlets.log import get_logger if py3compat.PY3: buffer = memoryview class_type = type else: from types import ClassType class_type = (type, ClassType) try: PICKLE_PROTOCOL = pickle.DEFAULT_PROTOCOL except AttributeError: PICKLE_PROTOCOL = pickle.HIGHEST_PROTOCOL def _get_cell_type(a=None): """the type of a closure cell doesn't seem to be importable, so just create one """ def inner(): return a return type(py3compat.get_closure(inner)[0]) cell_type = _get_cell_type() #------------------------------------------------------------------------------- # Functions #------------------------------------------------------------------------------- def interactive(f): """decorator for making functions appear as interactively defined. This results in the function being linked to the user_ns as globals() instead of the module globals(). """ # build new FunctionType, so it can have the right globals # interactive functions never have closures, that's kind of the point if isinstance(f, FunctionType): mainmod = __import__('__main__') f = FunctionType(f.__code__, mainmod.__dict__, f.__name__, f.__defaults__, ) # associate with __main__ for uncanning f.__module__ = '__main__' return f def use_dill(): """use dill to expand serialization support adds support for object methods and closures to serialization. """ # import dill causes most of the magic import dill # dill doesn't work with cPickle, # tell the two relevant modules to use plain pickle global pickle pickle = dill try: from ipykernel import serialize except ImportError: pass else: serialize.pickle = dill # disable special function handling, let dill take care of it can_map.pop(FunctionType, None) def use_cloudpickle(): """use cloudpickle to expand serialization support adds support for object methods and closures to serialization. """ import cloudpickle global pickle pickle = cloudpickle try: from ipykernel import serialize except ImportError: pass else: serialize.pickle = cloudpickle # disable special function handling, let cloudpickle take care of it can_map.pop(FunctionType, None) #------------------------------------------------------------------------------- # Classes #------------------------------------------------------------------------------- class CannedObject(object): def __init__(self, obj, keys=[], hook=None): """can an object for safe pickling Parameters ========== obj: The object to be canned keys: list (optional) list of attribute names that will be explicitly canned / uncanned hook: callable (optional) An optional extra callable, which can do additional processing of the uncanned object. large data may be offloaded into the buffers list, used for zero-copy transfers. """ self.keys = keys self.obj = copy.copy(obj) self.hook = can(hook) for key in keys: setattr(self.obj, key, can(getattr(obj, key))) self.buffers = [] def get_object(self, g=None): if g is None: g = {} obj = self.obj for key in self.keys: setattr(obj, key, uncan(getattr(obj, key), g)) if self.hook: self.hook = uncan(self.hook, g) self.hook(obj, g) return self.obj class Reference(CannedObject): """object for wrapping a remote reference by name.""" def __init__(self, name): if not isinstance(name, string_types): raise TypeError("illegal name: %r"%name) self.name = name self.buffers = [] def __repr__(self): return ""%self.name def get_object(self, g=None): if g is None: g = {} return eval(self.name, g) class CannedCell(CannedObject): """Can a closure cell""" def __init__(self, cell): self.cell_contents = can(cell.cell_contents) def get_object(self, g=None): cell_contents = uncan(self.cell_contents, g) def inner(): return cell_contents return py3compat.get_closure(inner)[0] class CannedFunction(CannedObject): def __init__(self, f): self._check_type(f) self.code = f.__code__ if f.__defaults__: self.defaults = [ can(fd) for fd in f.__defaults__ ] else: self.defaults = None closure = py3compat.get_closure(f) if closure: self.closure = tuple( can(cell) for cell in closure ) else: self.closure = None self.module = f.__module__ or '__main__' self.__name__ = f.__name__ self.buffers = [] def _check_type(self, obj): assert isinstance(obj, FunctionType), "Not a function type" def get_object(self, g=None): # try to load function back into its module: if not self.module.startswith('__'): __import__(self.module) g = sys.modules[self.module].__dict__ if g is None: g = {} if self.defaults: defaults = tuple(uncan(cfd, g) for cfd in self.defaults) else: defaults = None if self.closure: closure = tuple(uncan(cell, g) for cell in self.closure) else: closure = None newFunc = FunctionType(self.code, g, self.__name__, defaults, closure) return newFunc class CannedClass(CannedObject): def __init__(self, cls): self._check_type(cls) self.name = cls.__name__ self.old_style = not isinstance(cls, type) self._canned_dict = {} for k,v in cls.__dict__.items(): if k not in ('__weakref__', '__dict__'): self._canned_dict[k] = can(v) if self.old_style: mro = [] else: mro = cls.mro() self.parents = [ can(c) for c in mro[1:] ] self.buffers = [] def _check_type(self, obj): assert isinstance(obj, class_type), "Not a class type" def get_object(self, g=None): parents = tuple(uncan(p, g) for p in self.parents) return type(self.name, parents, uncan_dict(self._canned_dict, g=g)) class CannedArray(CannedObject): def __init__(self, obj): from numpy import ascontiguousarray self.shape = obj.shape self.dtype = obj.dtype.descr if obj.dtype.fields else obj.dtype.str self.pickled = False if sum(obj.shape) == 0: self.pickled = True elif obj.dtype == 'O': # can't handle object dtype with buffer approach self.pickled = True elif obj.dtype.fields and any(dt == 'O' for dt,sz in obj.dtype.fields.values()): self.pickled = True if self.pickled: # just pickle it self.buffers = [pickle.dumps(obj, PICKLE_PROTOCOL)] else: # ensure contiguous obj = ascontiguousarray(obj, dtype=None) self.buffers = [buffer(obj)] def get_object(self, g=None): from numpy import frombuffer data = self.buffers[0] if self.pickled: # we just pickled it return pickle.loads(buffer_to_bytes_py2(data)) else: if not py3compat.PY3 and isinstance(data, memoryview): # frombuffer doesn't accept memoryviews on Python 2, # so cast to old-style buffer data = buffer(data.tobytes()) return frombuffer(data, dtype=self.dtype).reshape(self.shape) class CannedBytes(CannedObject): wrap = staticmethod(buffer_to_bytes) def __init__(self, obj): self.buffers = [obj] def get_object(self, g=None): data = self.buffers[0] return self.wrap(data) class CannedBuffer(CannedBytes): wrap = buffer class CannedMemoryView(CannedBytes): wrap = memoryview #------------------------------------------------------------------------------- # Functions #------------------------------------------------------------------------------- def _import_mapping(mapping, original=None): """import any string-keys in a type mapping """ log = get_logger() log.debug("Importing canning map") for key,value in list(mapping.items()): if isinstance(key, string_types): try: cls = import_item(key) except Exception: if original and key not in original: # only message on user-added classes log.error("canning class not importable: %r", key, exc_info=True) mapping.pop(key) else: mapping[cls] = mapping.pop(key) def istype(obj, check): """like isinstance(obj, check), but strict This won't catch subclasses. """ if isinstance(check, tuple): for cls in check: if type(obj) is cls: return True return False else: return type(obj) is check def can(obj): """prepare an object for pickling""" import_needed = False for cls,canner in iteritems(can_map): if isinstance(cls, string_types): import_needed = True break elif istype(obj, cls): return canner(obj) if import_needed: # perform can_map imports, then try again # this will usually only happen once _import_mapping(can_map, _original_can_map) return can(obj) return obj def can_class(obj): if isinstance(obj, class_type) and obj.__module__ == '__main__': return CannedClass(obj) else: return obj def can_dict(obj): """can the *values* of a dict""" if istype(obj, dict): newobj = {} for k, v in iteritems(obj): newobj[k] = can(v) return newobj else: return obj sequence_types = (list, tuple, set) def can_sequence(obj): """can the elements of a sequence""" if istype(obj, sequence_types): t = type(obj) return t([can(i) for i in obj]) else: return obj def uncan(obj, g=None): """invert canning""" import_needed = False for cls,uncanner in iteritems(uncan_map): if isinstance(cls, string_types): import_needed = True break elif isinstance(obj, cls): return uncanner(obj, g) if import_needed: # perform uncan_map imports, then try again # this will usually only happen once _import_mapping(uncan_map, _original_uncan_map) return uncan(obj, g) return obj def uncan_dict(obj, g=None): if istype(obj, dict): newobj = {} for k, v in iteritems(obj): newobj[k] = uncan(v,g) return newobj else: return obj def uncan_sequence(obj, g=None): if istype(obj, sequence_types): t = type(obj) return t([uncan(i,g) for i in obj]) else: return obj #------------------------------------------------------------------------------- # API dictionaries #------------------------------------------------------------------------------- # These dicts can be extended for custom serialization of new objects can_map = { 'numpy.ndarray' : CannedArray, FunctionType : CannedFunction, bytes : CannedBytes, memoryview : CannedMemoryView, cell_type : CannedCell, class_type : can_class, } if buffer is not memoryview: can_map[buffer] = CannedBuffer uncan_map = { CannedObject : lambda obj, g: obj.get_object(g), dict : uncan_dict, } # for use in _import_mapping: _original_can_map = can_map.copy() _original_uncan_map = uncan_map.copy() ipykernel-5.2.0/ipykernel/pylab/000077500000000000000000000000001363550014400166325ustar00rootroot00000000000000ipykernel-5.2.0/ipykernel/pylab/__init__.py000066400000000000000000000000001363550014400207310ustar00rootroot00000000000000ipykernel-5.2.0/ipykernel/pylab/backend_inline.py000066400000000000000000000165041363550014400221370ustar00rootroot00000000000000"""A matplotlib backend for publishing figures via display_data""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import print_function import matplotlib from matplotlib.backends.backend_agg import ( new_figure_manager, FigureCanvasAgg, new_figure_manager_given_figure, ) # analysis: ignore from matplotlib import colors from matplotlib._pylab_helpers import Gcf from IPython.core.getipython import get_ipython from IPython.core.display import display from .config import InlineBackend def show(close=None, block=None): """Show all figures as SVG/PNG payloads sent to the IPython clients. Parameters ---------- close : bool, optional If true, a ``plt.close('all')`` call is automatically issued after sending all the figures. If this is set, the figures will entirely removed from the internal list of figures. block : Not used. The `block` parameter is a Matplotlib experimental parameter. We accept it in the function signature for compatibility with other backends. """ if close is None: close = InlineBackend.instance().close_figures try: for figure_manager in Gcf.get_all_fig_managers(): display( figure_manager.canvas.figure, metadata=_fetch_figure_metadata(figure_manager.canvas.figure) ) finally: show._to_draw = [] # only call close('all') if any to close # close triggers gc.collect, which can be slow if close and Gcf.get_all_fig_managers(): matplotlib.pyplot.close('all') # This flag will be reset by draw_if_interactive when called show._draw_called = False # list of figures to draw when flush_figures is called show._to_draw = [] def draw_if_interactive(): """ Is called after every pylab drawing command """ # signal that the current active figure should be sent at the end of # execution. Also sets the _draw_called flag, signaling that there will be # something to send. At the end of the code execution, a separate call to # flush_figures() will act upon these values manager = Gcf.get_active() if manager is None: return fig = manager.canvas.figure # Hack: matplotlib FigureManager objects in interacive backends (at least # in some of them) monkeypatch the figure object and add a .show() method # to it. This applies the same monkeypatch in order to support user code # that might expect `.show()` to be part of the official API of figure # objects. # For further reference: # https://github.com/ipython/ipython/issues/1612 # https://github.com/matplotlib/matplotlib/issues/835 if not hasattr(fig, 'show'): # Queue up `fig` for display fig.show = lambda *a: display(fig, metadata=_fetch_figure_metadata(fig)) # If matplotlib was manually set to non-interactive mode, this function # should be a no-op (otherwise we'll generate duplicate plots, since a user # who set ioff() manually expects to make separate draw/show calls). if not matplotlib.is_interactive(): return # ensure current figure will be drawn, and each subsequent call # of draw_if_interactive() moves the active figure to ensure it is # drawn last try: show._to_draw.remove(fig) except ValueError: # ensure it only appears in the draw list once pass # Queue up the figure for drawing in next show() call show._to_draw.append(fig) show._draw_called = True def flush_figures(): """Send all figures that changed This is meant to be called automatically and will call show() if, during prior code execution, there had been any calls to draw_if_interactive. This function is meant to be used as a post_execute callback in IPython, so user-caused errors are handled with showtraceback() instead of being allowed to raise. If this function is not called from within IPython, then these exceptions will raise. """ if not show._draw_called: return if InlineBackend.instance().close_figures: # ignore the tracking, just draw and close all figures try: return show(True) except Exception as e: # safely show traceback if in IPython, else raise ip = get_ipython() if ip is None: raise e else: ip.showtraceback() return try: # exclude any figures that were closed: active = set([fm.canvas.figure for fm in Gcf.get_all_fig_managers()]) for fig in [ fig for fig in show._to_draw if fig in active ]: try: display(fig, metadata=_fetch_figure_metadata(fig)) except Exception as e: # safely show traceback if in IPython, else raise ip = get_ipython() if ip is None: raise e else: ip.showtraceback() return finally: # clear flags for next round show._to_draw = [] show._draw_called = False # Changes to matplotlib in version 1.2 requires a mpl backend to supply a default # figurecanvas. This is set here to a Agg canvas # See https://github.com/matplotlib/matplotlib/pull/1125 FigureCanvas = FigureCanvasAgg def _enable_matplotlib_integration(): """Enable extra IPython matplotlib integration when we are loaded as the matplotlib backend.""" from matplotlib import get_backend ip = get_ipython() backend = get_backend() if ip and backend == 'module://%s' % __name__: from IPython.core.pylabtools import configure_inline_support, activate_matplotlib try: activate_matplotlib(backend) configure_inline_support(ip, backend) except (ImportError, AttributeError): # bugs may cause a circular import on Python 2 def configure_once(*args): activate_matplotlib(backend) configure_inline_support(ip, backend) ip.events.unregister('post_run_cell', configure_once) ip.events.register('post_run_cell', configure_once) _enable_matplotlib_integration() def _fetch_figure_metadata(fig): """Get some metadata to help with displaying a figure.""" # determine if a background is needed for legibility if _is_transparent(fig.get_facecolor()): # the background is transparent ticksLight = _is_light([label.get_color() for axes in fig.axes for axis in (axes.xaxis, axes.yaxis) for label in axis.get_ticklabels()]) if ticksLight.size and (ticksLight == ticksLight[0]).all(): # there are one or more tick labels, all with the same lightness return {'needs_background': 'dark' if ticksLight[0] else 'light'} return None def _is_light(color): """Determines if a color (or each of a sequence of colors) is light (as opposed to dark). Based on ITU BT.601 luminance formula (see https://stackoverflow.com/a/596241).""" rgbaArr = colors.to_rgba_array(color) return rgbaArr[:,:3].dot((.299, .587, .114)) > .5 def _is_transparent(color): """Determine transparency from alpha.""" rgba = colors.to_rgba(color) return rgba[3] < .5 ipykernel-5.2.0/ipykernel/pylab/config.py000066400000000000000000000106051363550014400204530ustar00rootroot00000000000000"""Configurable for configuring the IPython inline backend This module does not import anything from matplotlib. """ #----------------------------------------------------------------------------- # Copyright (C) 2011 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from traitlets.config.configurable import SingletonConfigurable from traitlets import ( Dict, Instance, Set, Bool, TraitError, Unicode ) #----------------------------------------------------------------------------- # Configurable for inline backend options #----------------------------------------------------------------------------- def pil_available(): """Test if PIL/Pillow is available""" out = False try: from PIL import Image out = True except: pass return out # inherit from InlineBackendConfig for deprecation purposes class InlineBackendConfig(SingletonConfigurable): pass class InlineBackend(InlineBackendConfig): """An object to store configuration of the inline backend.""" # The typical default figure size is too large for inline use, # so we shrink the figure size to 6x4, and tweak fonts to # make that fit. rc = Dict({'figure.figsize': (6.0,4.0), # play nicely with white background in the Qt and notebook frontend 'figure.facecolor': (1,1,1,0), 'figure.edgecolor': (1,1,1,0), # 12pt labels get cutoff on 6x4 logplots, so use 10pt. 'font.size': 10, # 72 dpi matches SVG/qtconsole # this only affects PNG export, as SVG has no dpi setting 'figure.dpi': 72, # 10pt still needs a little more room on the xlabel: 'figure.subplot.bottom' : .125 }, help="""Subset of matplotlib rcParams that should be different for the inline backend.""" ).tag(config=True) figure_formats = Set({'png'}, help="""A set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.""").tag(config=True) def _update_figure_formatters(self): if self.shell is not None: from IPython.core.pylabtools import select_figure_formats select_figure_formats(self.shell, self.figure_formats, **self.print_figure_kwargs) def _figure_formats_changed(self, name, old, new): if 'jpg' in new or 'jpeg' in new: if not pil_available(): raise TraitError("Requires PIL/Pillow for JPG figures") self._update_figure_formatters() figure_format = Unicode(help="""The figure format to enable (deprecated use `figure_formats` instead)""").tag(config=True) def _figure_format_changed(self, name, old, new): if new: self.figure_formats = {new} print_figure_kwargs = Dict({'bbox_inches' : 'tight'}, help="""Extra kwargs to be passed to fig.canvas.print_figure. Logical examples include: bbox_inches, quality (for jpeg figures), etc. """ ).tag(config=True) _print_figure_kwargs_changed = _update_figure_formatters close_figures = Bool(True, help="""Close all figures at the end of each cell. When True, ensures that each cell starts with no active figures, but it also means that one must keep track of references in order to edit or redraw figures in subsequent cells. This mode is ideal for the notebook, where residual plots from other cells might be surprising. When False, one must call figure() to create new figures. This means that gcf() and getfigs() can reference figures created in other cells, and the active figure can continue to be edited with pylab/pyplot methods that reference the current active figure. This mode facilitates iterative editing of figures, and behaves most consistently with other matplotlib backends, but figure barriers between cells must be explicit. """).tag(config=True) shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True) ipykernel-5.2.0/ipykernel/resources/000077500000000000000000000000001363550014400175355ustar00rootroot00000000000000ipykernel-5.2.0/ipykernel/resources/logo-32x32.png000066400000000000000000000020741363550014400217650ustar00rootroot00000000000000PNG  IHDR szzbKGD pHYs}}tIME "4kIDATXW]h\E=nmXBZ "?OUSQ"VI[EP-PP ZkXJcQ1anߦW7s9372ʈ{ d "#&ғ*;=TvuP1"AH^2:rdxKE)>.c)=L%BwH AYsp͟I{Pij,:#BǙ4dSvFW2(Yt2YGT$xҟobcXr|'cGЕ:@)<`QOw\Ḩ3Z1@:`)wZ*b 3ߍz:%2 îsmOFO5Ny]kG]{4y ^lSpn<@?.n@[4I}by1o Tb>IENDB`ipykernel-5.2.0/ipykernel/resources/logo-64x64.png000066400000000000000000000042041363550014400217740ustar00rootroot00000000000000PNG  IHDR@@iqbKGD pHYsEtIME ")IDATx[}lUg=" m:vU6u6D.u: n3deiAVV(t.3#Qh7=?s9v;ƾm޷=:o15t\6>$HR$9dIN'9Hb@ (Q6ɣ)S/U )B8s-\5{mX]ֽaUi7$G^ y C}=4?D?Y<- Yώx @\ J^P"[Hp 0 ֦A񫥩_L/'ڈOFF>v`MoT$No kuN=v`A]]{wiB #DŇcIR uQH.p4ZX cI!*5y7<`ڷB_@$ ͋J?֌EpF]`xtf;C/ ØhXKL-@w< P[`V{P#83a`5p'ڠ )L&eFӴh߳PO@ :O5[[Xw`T߱G$i˾%T٨ !:'%Wr  }2 %'BM[:[TdĒ|!!ge0ϵU@Vn\kA Nu`#pֿ$;@M,p z{;FR23 6Q| !5OhH!J|/AB|E[K33iߞp%H@/G3՗+kxFH-\(YQ ^ +#8<FWP5VH- !("YK;~3 pFB+lrpb\\5vc?WH.=(`;H_cFaG%Q<[)3z$=E(oǡ0xTv%uDW'TjA1;Duh@Ïo3R)@,#I-˞`6Fj5>Pp3uY5H yE \`)o޼gۖbM6(EBESD!#t>'Ksrh޼C1;ϑEqta Dvhx׏o-[W\g-jא=   D*pxۗ9Z8g#9:SPeD 81>PoN#HnZ뵦;,J+^G(d-"sj:@J I}Uq6 qF}fk1c2yޭ%nFY;"5D*v7kna&ÁilDP#3pmWG$:6|&@aÀ4L4oJ-"]WIME4Z A RpolhR?Kt`>6Zbx!(E)1u ?tۓZ|ߎ[ Tαs̷ÜZ@{2ΞIiXdcC@v˲ro\hXf|O޹o(+h (C /&9`N}DIU1< 4u5D*q Dii:Q4-',#Yx[yX9NY/_2vrPeI}qG{ . 'HwEzRJzԧu1;|>,Jxґw h12l9wF!; 2+s~67N4܋Q}?K58 { -yf9;buz^ɖn)AI12U>p /c|kP0^,h` 2Q" "AyR`jL15&ynIENDB`ipykernel-5.2.0/ipykernel/serialize.py000066400000000000000000000132671363550014400200750ustar00rootroot00000000000000"""serialization utilities for apply messages""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import warnings warnings.warn("ipykernel.serialize is deprecated. It has moved to ipyparallel.serialize", DeprecationWarning) import pickle from itertools import chain from ipykernel.pickleutil import ( can, uncan, can_sequence, uncan_sequence, CannedObject, istype, sequence_types, PICKLE_PROTOCOL, ) from jupyter_client.session import MAX_ITEMS, MAX_BYTES #----------------------------------------------------------------------------- # Serialization Functions #----------------------------------------------------------------------------- def _extract_buffers(obj, threshold=MAX_BYTES): """extract buffers larger than a certain threshold""" buffers = [] if isinstance(obj, CannedObject) and obj.buffers: for i,buf in enumerate(obj.buffers): if len(buf) > threshold: # buffer larger than threshold, prevent pickling obj.buffers[i] = None buffers.append(buf) # buffer too small for separate send, coerce to bytes # because pickling buffer objects just results in broken pointers elif isinstance(buf, memoryview): obj.buffers[i] = buf.tobytes() return buffers def _restore_buffers(obj, buffers): """restore buffers extracted by """ if isinstance(obj, CannedObject) and obj.buffers: for i,buf in enumerate(obj.buffers): if buf is None: obj.buffers[i] = buffers.pop(0) def serialize_object(obj, buffer_threshold=MAX_BYTES, item_threshold=MAX_ITEMS): """Serialize an object into a list of sendable buffers. Parameters ---------- obj : object The object to be serialized buffer_threshold : int The threshold (in bytes) for pulling out data buffers to avoid pickling them. item_threshold : int The maximum number of items over which canning will iterate. Containers (lists, dicts) larger than this will be pickled without introspection. Returns ------- [bufs] : list of buffers representing the serialized object. """ buffers = [] if istype(obj, sequence_types) and len(obj) < item_threshold: cobj = can_sequence(obj) for c in cobj: buffers.extend(_extract_buffers(c, buffer_threshold)) elif istype(obj, dict) and len(obj) < item_threshold: cobj = {} for k in sorted(obj): c = can(obj[k]) buffers.extend(_extract_buffers(c, buffer_threshold)) cobj[k] = c else: cobj = can(obj) buffers.extend(_extract_buffers(cobj, buffer_threshold)) buffers.insert(0, pickle.dumps(cobj, PICKLE_PROTOCOL)) return buffers def deserialize_object(buffers, g=None): """reconstruct an object serialized by serialize_object from data buffers. Parameters ---------- bufs : list of buffers/bytes g : globals to be used when uncanning Returns ------- (newobj, bufs) : unpacked object, and the list of remaining unused buffers. """ bufs = list(buffers) pobj = bufs.pop(0) canned = pickle.loads(pobj) if istype(canned, sequence_types) and len(canned) < MAX_ITEMS: for c in canned: _restore_buffers(c, bufs) newobj = uncan_sequence(canned, g) elif istype(canned, dict) and len(canned) < MAX_ITEMS: newobj = {} for k in sorted(canned): c = canned[k] _restore_buffers(c, bufs) newobj[k] = uncan(c, g) else: _restore_buffers(canned, bufs) newobj = uncan(canned, g) return newobj, bufs def pack_apply_message(f, args, kwargs, buffer_threshold=MAX_BYTES, item_threshold=MAX_ITEMS): """pack up a function, args, and kwargs to be sent over the wire Each element of args/kwargs will be canned for special treatment, but inspection will not go any deeper than that. Any object whose data is larger than `threshold` will not have their data copied (only numpy arrays and bytes/buffers support zero-copy) Message will be a list of bytes/buffers of the format: [ cf, pinfo, , ] With length at least two + len(args) + len(kwargs) """ arg_bufs = list(chain.from_iterable( serialize_object(arg, buffer_threshold, item_threshold) for arg in args)) kw_keys = sorted(kwargs.keys()) kwarg_bufs = list(chain.from_iterable( serialize_object(kwargs[key], buffer_threshold, item_threshold) for key in kw_keys)) info = dict(nargs=len(args), narg_bufs=len(arg_bufs), kw_keys=kw_keys) msg = [pickle.dumps(can(f), PICKLE_PROTOCOL)] msg.append(pickle.dumps(info, PICKLE_PROTOCOL)) msg.extend(arg_bufs) msg.extend(kwarg_bufs) return msg def unpack_apply_message(bufs, g=None, copy=True): """unpack f,args,kwargs from buffers packed by pack_apply_message() Returns: original f,args,kwargs""" bufs = list(bufs) # allow us to pop assert len(bufs) >= 2, "not enough buffers!" pf = bufs.pop(0) f = uncan(pickle.loads(pf), g) pinfo = bufs.pop(0) info = pickle.loads(pinfo) arg_bufs, kwarg_bufs = bufs[:info['narg_bufs']], bufs[info['narg_bufs']:] args = [] for i in range(info['nargs']): arg, arg_bufs = deserialize_object(arg_bufs, g) args.append(arg) args = tuple(args) assert not arg_bufs, "Shouldn't be any arg bufs left over" kwargs = {} for key in info['kw_keys']: kwarg, kwarg_bufs = deserialize_object(kwarg_bufs, g) kwargs[key] = kwarg assert not kwarg_bufs, "Shouldn't be any kwarg bufs left over" return f,args,kwargs ipykernel-5.2.0/ipykernel/tests/000077500000000000000000000000001363550014400166655ustar00rootroot00000000000000ipykernel-5.2.0/ipykernel/tests/__init__.py000066400000000000000000000017461363550014400210060ustar00rootroot00000000000000# Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import os import shutil import sys import tempfile try: from unittest.mock import patch except ImportError: from mock import patch from jupyter_core import paths as jpaths from IPython import paths as ipaths from ipykernel.kernelspec import install pjoin = os.path.join tmp = None patchers = [] def setup(): """setup temporary env for tests""" global tmp tmp = tempfile.mkdtemp() patchers[:] = [ patch.dict(os.environ, { 'HOME': tmp, # Let tests work with --user install when HOME is changed: 'PYTHONPATH': os.pathsep.join(sys.path), }), ] for p in patchers: p.start() # install IPython in the temp home: install(user=True) def teardown(): for p in patchers: p.stop() try: shutil.rmtree(tmp) except (OSError, IOError): # no such file pass ipykernel-5.2.0/ipykernel/tests/_asyncio_utils.py000066400000000000000000000005531363550014400222660ustar00rootroot00000000000000"""test utilities that use async/await syntax a separate file to avoid syntax errors on Python 2 """ import asyncio def async_func(): """Simple async function to schedule a task on the current eventloop""" loop = asyncio.get_event_loop() assert loop.is_running() async def task(): await asyncio.sleep(1) loop.create_task(task()) ipykernel-5.2.0/ipykernel/tests/test_async.py000066400000000000000000000040621363550014400214150ustar00rootroot00000000000000"""Test async/await integration""" from distutils.version import LooseVersion as V import sys import pytest import IPython from .utils import execute, flush_channels, start_new_kernel, TIMEOUT from .test_message_spec import validate_message KC = KM = None def setup_function(): """start the global kernel (if it isn't running) and return its client""" global KM, KC KM, KC = start_new_kernel() flush_channels(KC) def teardown_function(): KC.stop_channels() KM.shutdown_kernel(now=True) skip_without_async = pytest.mark.skipif( sys.version_info < (3, 5) or V(IPython.__version__) < V("7.0"), reason="IPython >=7 with async/await required", ) @skip_without_async def test_async_await(): flush_channels(KC) msg_id, content = execute("import asyncio; await asyncio.sleep(0.1)", KC) assert content["status"] == "ok", content @pytest.mark.parametrize("asynclib", ["asyncio", "trio", "curio"]) @skip_without_async def test_async_interrupt(asynclib, request): try: __import__(asynclib) except ImportError: pytest.skip("Requires %s" % asynclib) request.addfinalizer(lambda: execute("%autoawait asyncio", KC)) flush_channels(KC) msg_id, content = execute("%autoawait " + asynclib, KC) assert content["status"] == "ok", content flush_channels(KC) msg_id = KC.execute( "print('begin'); import {0}; await {0}.sleep(5)".format(asynclib) ) busy = KC.get_iopub_msg(timeout=TIMEOUT) validate_message(busy, "status", msg_id) assert busy["content"]["execution_state"] == "busy" echo = KC.get_iopub_msg(timeout=TIMEOUT) validate_message(echo, "execute_input") stream = KC.get_iopub_msg(timeout=TIMEOUT) # wait for the stream output to be sure kernel is in the async block validate_message(stream, "stream") assert stream["content"]["text"] == "begin\n" KM.interrupt_kernel() reply = KC.get_shell_msg()["content"] assert reply["status"] == "error", reply assert reply["ename"] in {"CancelledError", "KeyboardInterrupt"} flush_channels(KC) ipykernel-5.2.0/ipykernel/tests/test_connect.py000066400000000000000000000101501363550014400217240ustar00rootroot00000000000000"""Tests for kernel connection utilities""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import errno import json import os from unittest.mock import patch import pytest import zmq from traitlets.config import Config from ipython_genutils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory from ipython_genutils.py3compat import str_to_bytes from ipykernel import connect from ipykernel.kernelapp import IPKernelApp sample_info = dict(ip='1.2.3.4', transport='ipc', shell_port=1, hb_port=2, iopub_port=3, stdin_port=4, control_port=5, key=b'abc123', signature_scheme='hmac-md5', ) class DummyKernelApp(IPKernelApp): def _default_shell_port(self): return 0 def initialize(self, argv=[]): self.init_profile_dir() self.init_connection_file() def test_get_connection_file(): cfg = Config() with TemporaryWorkingDirectory() as d: cfg.ProfileDir.location = d cf = 'kernel.json' app = DummyKernelApp(config=cfg, connection_file=cf) app.initialize() profile_cf = os.path.join(app.connection_dir, cf) assert profile_cf == app.abs_connection_file with open(profile_cf, 'w') as f: f.write("{}") assert os.path.exists(profile_cf) assert connect.get_connection_file(app) == profile_cf app.connection_file = cf assert connect.get_connection_file(app) == profile_cf def test_get_connection_info(): with TemporaryDirectory() as d: cf = os.path.join(d, 'kernel.json') connect.write_connection_file(cf, **sample_info) json_info = connect.get_connection_info(cf) info = connect.get_connection_info(cf, unpack=True) assert isinstance(json_info, str) sub_info = {k:v for k,v in info.items() if k in sample_info} assert sub_info == sample_info info2 = json.loads(json_info) info2['key'] = str_to_bytes(info2['key']) sub_info2 = {k:v for k,v in info.items() if k in sample_info} assert sub_info2 == sample_info def test_port_bind_failure_raises(request): cfg = Config() with TemporaryWorkingDirectory() as d: cfg.ProfileDir.location = d cf = 'kernel.json' app = DummyKernelApp(config=cfg, connection_file=cf) request.addfinalizer(app.close) app.initialize() with patch.object(app, '_try_bind_socket') as mock_try_bind: mock_try_bind.side_effect = zmq.ZMQError(-100, "fails for unknown error types") with pytest.raises(zmq.ZMQError): app.init_sockets() assert mock_try_bind.call_count == 1 def test_port_bind_failure_recovery(request): try: errno.WSAEADDRINUSE except AttributeError: # Fake windows address in-use code p = patch.object(errno, 'WSAEADDRINUSE', 12345, create=True) p.start() request.addfinalizer(p.stop) cfg = Config() with TemporaryWorkingDirectory() as d: cfg.ProfileDir.location = d cf = 'kernel.json' app = DummyKernelApp(config=cfg, connection_file=cf) request.addfinalizer(app.close) app.initialize() with patch.object(app, '_try_bind_socket') as mock_try_bind: mock_try_bind.side_effect = [ zmq.ZMQError(errno.EADDRINUSE, "fails for non-bind unix"), zmq.ZMQError(errno.WSAEADDRINUSE, "fails for non-bind windows") ] + [0] * 100 # Shouldn't raise anything as retries will kick in app.init_sockets() def test_port_bind_failure_gives_up_retries(request): cfg = Config() with TemporaryWorkingDirectory() as d: cfg.ProfileDir.location = d cf = 'kernel.json' app = DummyKernelApp(config=cfg, connection_file=cf) request.addfinalizer(app.close) app.initialize() with patch.object(app, '_try_bind_socket') as mock_try_bind: mock_try_bind.side_effect = zmq.ZMQError(errno.EADDRINUSE, "fails for non-bind") with pytest.raises(zmq.ZMQError): app.init_sockets() assert mock_try_bind.call_count == 100 ipykernel-5.2.0/ipykernel/tests/test_embed_kernel.py000066400000000000000000000122501363550014400227120ustar00rootroot00000000000000"""test IPython.embed_kernel()""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import os import sys import time import json from contextlib import contextmanager from subprocess import Popen, PIPE from flaky import flaky from jupyter_client import BlockingKernelClient from jupyter_core import paths from ipython_genutils import py3compat from ipython_genutils.py3compat import unicode_type SETUP_TIMEOUT = 60 TIMEOUT = 15 @contextmanager def setup_kernel(cmd): """start an embedded kernel in a subprocess, and wait for it to be ready Returns ------- kernel_manager: connected KernelManager instance """ def connection_file_ready(connection_file): """Check if connection_file is a readable json file.""" if not os.path.exists(connection_file): return False try: with open(connection_file) as f: json.load(f) return True except ValueError: return False kernel = Popen([sys.executable, '-c', cmd], stdout=PIPE, stderr=PIPE) try: connection_file = os.path.join( paths.jupyter_runtime_dir(), 'kernel-%i.json' % kernel.pid, ) # wait for connection file to exist, timeout after 5s tic = time.time() while not connection_file_ready(connection_file) \ and kernel.poll() is None \ and time.time() < tic + SETUP_TIMEOUT: time.sleep(0.1) # Wait 100ms for the writing to finish time.sleep(0.1) if kernel.poll() is not None: o,e = kernel.communicate() e = py3compat.cast_unicode(e) raise IOError("Kernel failed to start:\n%s" % e) if not os.path.exists(connection_file): if kernel.poll() is None: kernel.terminate() raise IOError("Connection file %r never arrived" % connection_file) client = BlockingKernelClient(connection_file=connection_file) client.load_connection_file() client.start_channels() client.wait_for_ready() try: yield client finally: client.stop_channels() finally: kernel.terminate() @flaky(max_runs=3) def test_embed_kernel_basic(): """IPython.embed_kernel() is basically functional""" cmd = '\n'.join([ 'from IPython import embed_kernel', 'def go():', ' a=5', ' b="hi there"', ' embed_kernel()', 'go()', '', ]) with setup_kernel(cmd) as client: # oinfo a (int) msg_id = client.inspect('a') msg = client.get_shell_msg(block=True, timeout=TIMEOUT) content = msg['content'] assert content['found'] msg_id = client.execute("c=a*2") msg = client.get_shell_msg(block=True, timeout=TIMEOUT) content = msg['content'] assert content['status'] == u'ok' # oinfo c (should be 10) msg_id = client.inspect('c') msg = client.get_shell_msg(block=True, timeout=TIMEOUT) content = msg['content'] assert content['found'] text = content['data']['text/plain'] assert '10' in text @flaky(max_runs=3) def test_embed_kernel_namespace(): """IPython.embed_kernel() inherits calling namespace""" cmd = '\n'.join([ 'from IPython import embed_kernel', 'def go():', ' a=5', ' b="hi there"', ' embed_kernel()', 'go()', '', ]) with setup_kernel(cmd) as client: # oinfo a (int) msg_id = client.inspect('a') msg = client.get_shell_msg(block=True, timeout=TIMEOUT) content = msg['content'] assert content['found'] text = content['data']['text/plain'] assert u'5' in text # oinfo b (str) msg_id = client.inspect('b') msg = client.get_shell_msg(block=True, timeout=TIMEOUT) content = msg['content'] assert content['found'] text = content['data']['text/plain'] assert u'hi there' in text # oinfo c (undefined) msg_id = client.inspect('c') msg = client.get_shell_msg(block=True, timeout=TIMEOUT) content = msg['content'] assert not content['found'] @flaky(max_runs=3) def test_embed_kernel_reentrant(): """IPython.embed_kernel() can be called multiple times""" cmd = '\n'.join([ 'from IPython import embed_kernel', 'count = 0', 'def go():', ' global count', ' embed_kernel()', ' count = count + 1', '', 'while True:' ' go()', '', ]) with setup_kernel(cmd) as client: for i in range(5): msg_id = client.inspect('count') msg = client.get_shell_msg(block=True, timeout=TIMEOUT) content = msg['content'] assert content['found'] text = content['data']['text/plain'] assert unicode_type(i) in text # exit from embed_kernel client.execute("get_ipython().exit_now = True") msg = client.get_shell_msg(block=True, timeout=TIMEOUT) time.sleep(0.2) ipykernel-5.2.0/ipykernel/tests/test_eventloop.py000066400000000000000000000020371363550014400223130ustar00rootroot00000000000000"""Test eventloop integration""" import sys import pytest import tornado from .utils import flush_channels, start_new_kernel, execute KC = KM = None def setup(): """start the global kernel (if it isn't running) and return its client""" global KM, KC KM, KC = start_new_kernel() flush_channels(KC) def teardown(): KC.stop_channels() KM.shutdown_kernel(now=True) async_code = """ from ipykernel.tests._asyncio_utils import async_func async_func() """ @pytest.mark.skipif(sys.version_info < (3, 5), reason="async/await syntax required") @pytest.mark.skipif(tornado.version_info < (5,), reason="only relevant on tornado 5") def test_asyncio_interrupt(): flush_channels(KC) msg_id, content = execute('%gui asyncio', KC) assert content['status'] == 'ok', content flush_channels(KC) msg_id, content = execute(async_code, KC) assert content['status'] == 'ok', content KM.interrupt_kernel() flush_channels(KC) msg_id, content = execute(async_code, KC) assert content['status'] == 'ok' ipykernel-5.2.0/ipykernel/tests/test_heartbeat.py000066400000000000000000000035401363550014400222370ustar00rootroot00000000000000"""Tests for heartbeat thread""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import errno from unittest.mock import patch import pytest import zmq from ipykernel.heartbeat import Heartbeat def test_port_bind_failure_raises(): heart = Heartbeat(None) with patch.object(heart, '_try_bind_socket') as mock_try_bind: mock_try_bind.side_effect = zmq.ZMQError(-100, "fails for unknown error types") with pytest.raises(zmq.ZMQError): heart._bind_socket() assert mock_try_bind.call_count == 1 def test_port_bind_success(): heart = Heartbeat(None) with patch.object(heart, '_try_bind_socket') as mock_try_bind: heart._bind_socket() assert mock_try_bind.call_count == 1 def test_port_bind_failure_recovery(): try: errno.WSAEADDRINUSE except AttributeError: # Fake windows address in-use code errno.WSAEADDRINUSE = 12345 try: heart = Heartbeat(None) with patch.object(heart, '_try_bind_socket') as mock_try_bind: mock_try_bind.side_effect = [ zmq.ZMQError(errno.EADDRINUSE, "fails for non-bind unix"), zmq.ZMQError(errno.WSAEADDRINUSE, "fails for non-bind windows") ] + [0] * 100 # Shouldn't raise anything as retries will kick in heart._bind_socket() finally: # Cleanup fake assignment if errno.WSAEADDRINUSE == 12345: del errno.WSAEADDRINUSE def test_port_bind_failure_gives_up_retries(): heart = Heartbeat(None) with patch.object(heart, '_try_bind_socket') as mock_try_bind: mock_try_bind.side_effect = zmq.ZMQError(errno.EADDRINUSE, "fails for non-bind") with pytest.raises(zmq.ZMQError): heart._bind_socket() assert mock_try_bind.call_count == 100 ipykernel-5.2.0/ipykernel/tests/test_io.py000066400000000000000000000020751363550014400207110ustar00rootroot00000000000000"""Test IO capturing functionality""" import io import zmq from jupyter_client.session import Session from ipykernel.iostream import IOPubThread, OutStream import nose.tools as nt def test_io_api(): """Test that wrapped stdout has the same API as a normal TextIO object""" session = Session() ctx = zmq.Context() pub = ctx.socket(zmq.PUB) thread = IOPubThread(pub) thread.start() stream = OutStream(session, thread, 'stdout') # cleanup unused zmq objects before we start testing thread.stop() thread.close() ctx.term() assert stream.errors is None assert not stream.isatty() with nt.assert_raises(io.UnsupportedOperation): stream.detach() with nt.assert_raises(io.UnsupportedOperation): next(stream) with nt.assert_raises(io.UnsupportedOperation): stream.read() with nt.assert_raises(io.UnsupportedOperation): stream.readline() with nt.assert_raises(io.UnsupportedOperation): stream.seek() with nt.assert_raises(io.UnsupportedOperation): stream.tell() ipykernel-5.2.0/ipykernel/tests/test_jsonutil.py000066400000000000000000000062701363550014400221520ustar00rootroot00000000000000# coding: utf-8 """Test suite for our JSON utilities.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from binascii import a2b_base64 import json from datetime import datetime import numbers import nose.tools as nt from .. import jsonutil from ..jsonutil import json_clean, encode_images from ipython_genutils.py3compat import unicode_to_str class MyInt(object): def __int__(self): return 389 numbers.Integral.register(MyInt) class MyFloat(object): def __float__(self): return 3.14 numbers.Real.register(MyFloat) def test(): # list of input/expected output. Use None for the expected output if it # can be the same as the input. pairs = [(1, None), # start with scalars (1.0, None), ('a', None), (True, None), (False, None), (None, None), # Containers ([1, 2], None), ((1, 2), [1, 2]), (set([1, 2]), [1, 2]), (dict(x=1), None), ({'x': 1, 'y':[1,2,3], '1':'int'}, None), # More exotic objects ((x for x in range(3)), [0, 1, 2]), (iter([1, 2]), [1, 2]), (datetime(1991, 7, 3, 12, 00), "1991-07-03T12:00:00.000000"), (MyFloat(), 3.14), (MyInt(), 389) ] for val, jval in pairs: if jval is None: jval = val out = json_clean(val) # validate our cleanup assert out == jval # and ensure that what we return, indeed encodes cleanly json.loads(json.dumps(out)) def test_encode_images(): # invalid data, but the header and footer are from real files pngdata = b'\x89PNG\r\n\x1a\nblahblahnotactuallyvalidIEND\xaeB`\x82' jpegdata = b'\xff\xd8\xff\xe0\x00\x10JFIFblahblahjpeg(\xa0\x0f\xff\xd9' pdfdata = b'%PDF-1.\ntrailer<>]>>>>>>' bindata = b'\xff\xff\xff\xff' fmt = { 'image/png' : pngdata, 'image/jpeg' : jpegdata, 'application/pdf' : pdfdata, 'application/unrecognized': bindata, } encoded = json_clean(encode_images(fmt)) for key, value in fmt.items(): # encoded has unicode, want bytes decoded = a2b_base64(encoded[key]) assert decoded == value encoded2 = json_clean(encode_images(encoded)) assert encoded == encoded2 # test that we don't double-encode base64 str b64_str = {} for key, encoded in encoded.items(): b64_str[key] = unicode_to_str(encoded) encoded3 = json_clean(encode_images(b64_str)) assert encoded3 == b64_str for key, value in fmt.items(): decoded = a2b_base64(encoded3[key]) assert decoded == value def test_lambda(): with nt.assert_raises(ValueError): json_clean(lambda : 1) def test_exception(): bad_dicts = [{1:'number', '1':'string'}, {True:'bool', 'True':'string'}, ] for d in bad_dicts: nt.assert_raises(ValueError, json_clean, d) def test_unicode_dict(): data = {u'üniço∂e': u'üniço∂e'} clean = jsonutil.json_clean(data) assert data == clean ipykernel-5.2.0/ipykernel/tests/test_kernel.py000066400000000000000000000250711363550014400215630ustar00rootroot00000000000000# coding: utf-8 """test the IPython Kernel""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import ast import io import os.path import sys import time import nose.tools as nt from flaky import flaky from IPython.testing import decorators as dec, tools as tt from ipython_genutils import py3compat from IPython.paths import locate_profile from ipython_genutils.tempdir import TemporaryDirectory from .utils import ( new_kernel, kernel, TIMEOUT, assemble_output, execute, flush_channels, wait_for_idle, ) def _check_master(kc, expected=True, stream="stdout"): execute(kc=kc, code="import sys") flush_channels(kc) msg_id, content = execute(kc=kc, code="print (sys.%s._is_master_process())" % stream) stdout, stderr = assemble_output(kc.iopub_channel) assert stdout.strip() == repr(expected) def _check_status(content): """If status=error, show the traceback""" if content['status'] == 'error': assert False, ''.join(['\n'] + content['traceback']) # printing tests def test_simple_print(): """simple print statement in kernel""" with kernel() as kc: iopub = kc.iopub_channel msg_id, content = execute(kc=kc, code="print ('hi')") stdout, stderr = assemble_output(iopub) assert stdout == 'hi\n' assert stderr == '' _check_master(kc, expected=True) def test_sys_path(): """test that sys.path doesn't get messed up by default""" with kernel() as kc: msg_id, content = execute(kc=kc, code="import sys; print(repr(sys.path))") stdout, stderr = assemble_output(kc.iopub_channel) # for error-output on failure sys.stderr.write(stderr) sys_path = ast.literal_eval(stdout.strip()) assert '' in sys_path def test_sys_path_profile_dir(): """test that sys.path doesn't get messed up when `--profile-dir` is specified""" with new_kernel(['--profile-dir', locate_profile('default')]) as kc: msg_id, content = execute(kc=kc, code="import sys; print(repr(sys.path))") stdout, stderr = assemble_output(kc.iopub_channel) # for error-output on failure sys.stderr.write(stderr) sys_path = ast.literal_eval(stdout.strip()) assert '' in sys_path @flaky(max_runs=3) @dec.skipif(sys.platform == 'win32', "subprocess prints fail on Windows") def test_subprocess_print(): """printing from forked mp.Process""" with new_kernel() as kc: iopub = kc.iopub_channel _check_master(kc, expected=True) flush_channels(kc) np = 5 code = '\n'.join([ "from __future__ import print_function", "import time", "import multiprocessing as mp", "pool = [mp.Process(target=print, args=('hello', i,)) for i in range(%i)]" % np, "for p in pool: p.start()", "for p in pool: p.join()", "time.sleep(0.5)," ]) msg_id, content = execute(kc=kc, code=code) stdout, stderr = assemble_output(iopub) nt.assert_equal(stdout.count("hello"), np, stdout) for n in range(np): nt.assert_equal(stdout.count(str(n)), 1, stdout) assert stderr == '' _check_master(kc, expected=True) _check_master(kc, expected=True, stream="stderr") @flaky(max_runs=3) def test_subprocess_noprint(): """mp.Process without print doesn't trigger iostream mp_mode""" with kernel() as kc: iopub = kc.iopub_channel np = 5 code = '\n'.join([ "import multiprocessing as mp", "pool = [mp.Process(target=range, args=(i,)) for i in range(%i)]" % np, "for p in pool: p.start()", "for p in pool: p.join()" ]) msg_id, content = execute(kc=kc, code=code) stdout, stderr = assemble_output(iopub) assert stdout == '' assert stderr == '' _check_master(kc, expected=True) _check_master(kc, expected=True, stream="stderr") @flaky(max_runs=3) @dec.skipif(sys.platform == 'win32', "subprocess prints fail on Windows") def test_subprocess_error(): """error in mp.Process doesn't crash""" with new_kernel() as kc: iopub = kc.iopub_channel code = '\n'.join([ "import multiprocessing as mp", "p = mp.Process(target=int, args=('hi',))", "p.start()", "p.join()", ]) msg_id, content = execute(kc=kc, code=code) stdout, stderr = assemble_output(iopub) assert stdout == '' assert "ValueError" in stderr _check_master(kc, expected=True) _check_master(kc, expected=True, stream="stderr") # raw_input tests def test_raw_input(): """test [raw_]input""" with kernel() as kc: iopub = kc.iopub_channel input_f = "input" if py3compat.PY3 else "raw_input" theprompt = "prompt> " code = 'print({input_f}("{theprompt}"))'.format(**locals()) msg_id = kc.execute(code, allow_stdin=True) msg = kc.get_stdin_msg(block=True, timeout=TIMEOUT) assert msg['header']['msg_type'] == u'input_request' content = msg['content'] assert content['prompt'] == theprompt text = "some text" kc.input(text) reply = kc.get_shell_msg(block=True, timeout=TIMEOUT) assert reply['content']['status'] == 'ok' stdout, stderr = assemble_output(iopub) assert stdout == text + "\n" @dec.skipif(py3compat.PY3) def test_eval_input(): """test input() on Python 2""" with kernel() as kc: iopub = kc.iopub_channel input_f = "input" if py3compat.PY3 else "raw_input" theprompt = "prompt> " code = 'print(input("{theprompt}"))'.format(**locals()) msg_id = kc.execute(code, allow_stdin=True) msg = kc.get_stdin_msg(block=True, timeout=TIMEOUT) assert msg['header']['msg_type'] == u'input_request' content = msg['content'] assert content['prompt'] == theprompt kc.input("1+1") reply = kc.get_shell_msg(block=True, timeout=TIMEOUT) assert reply['content']['status'] == 'ok' stdout, stderr = assemble_output(iopub) assert stdout == "2\n" def test_save_history(): # Saving history from the kernel with %hist -f was failing because of # unicode problems on Python 2. with kernel() as kc, TemporaryDirectory() as td: file = os.path.join(td, 'hist.out') execute(u'a=1', kc=kc) wait_for_idle(kc) execute(u'b=u"abcþ"', kc=kc) wait_for_idle(kc) _, reply = execute("%hist -f " + file, kc=kc) assert reply['status'] == 'ok' with io.open(file, encoding='utf-8') as f: content = f.read() assert u'a=1' in content assert u'b=u"abcþ"' in content @dec.skip_without('faulthandler') def test_smoke_faulthandler(): with kernel() as kc: # Note: faulthandler.register is not available on windows. code = u'\n'.join([ 'import sys', 'import faulthandler', 'import signal', 'faulthandler.enable()', 'if not sys.platform.startswith("win32"):', ' faulthandler.register(signal.SIGTERM)']) _, reply = execute(code, kc=kc) nt.assert_equal(reply['status'], 'ok', reply.get('traceback', '')) def test_help_output(): """ipython kernel --help-all works""" tt.help_all_output_test('kernel') def test_is_complete(): with kernel() as kc: # There are more test cases for this in core - here we just check # that the kernel exposes the interface correctly. kc.is_complete('2+2') reply = kc.get_shell_msg(block=True, timeout=TIMEOUT) assert reply['content']['status'] == 'complete' # SyntaxError kc.is_complete('raise = 2') reply = kc.get_shell_msg(block=True, timeout=TIMEOUT) assert reply['content']['status'] == 'invalid' kc.is_complete('a = [1,\n2,') reply = kc.get_shell_msg(block=True, timeout=TIMEOUT) assert reply['content']['status'] == 'incomplete' assert reply['content']['indent'] == '' # Cell magic ends on two blank lines for console UIs kc.is_complete('%%timeit\na\n\n') reply = kc.get_shell_msg(block=True, timeout=TIMEOUT) assert reply['content']['status'] == 'complete' def test_complete(): with kernel() as kc: execute(u'a = 1', kc=kc) wait_for_idle(kc) cell = 'import IPython\nb = a.' kc.complete(cell) reply = kc.get_shell_msg(block=True, timeout=TIMEOUT) c = reply['content'] assert c['status'] == 'ok' start = cell.find('a.') end = start + 2 assert c['cursor_end'] == cell.find('a.') + 2 assert c['cursor_start'] <= end # there are many right answers for cursor_start, # so verify application of the completion # rather than the value of cursor_start matches = c['matches'] assert matches for m in matches: completed = cell[:c['cursor_start']] + m assert completed.startswith(cell) @dec.skip_without('matplotlib') def test_matplotlib_inline_on_import(): with kernel() as kc: cell = '\n'.join([ 'import matplotlib, matplotlib.pyplot as plt', 'backend = matplotlib.get_backend()' ]) _, reply = execute(cell, user_expressions={'backend': 'backend'}, kc=kc) _check_status(reply) backend_bundle = reply['user_expressions']['backend'] _check_status(backend_bundle) assert 'backend_inline' in backend_bundle['data']['text/plain'] def test_message_order(): N = 100 # number of messages to test with kernel() as kc: _, reply = execute("a = 1", kc=kc) _check_status(reply) offset = reply['execution_count'] + 1 cell = "a += 1\na" msg_ids = [] # submit N executions as fast as we can for i in range(N): msg_ids.append(kc.execute(cell)) # check message-handling order for i, msg_id in enumerate(msg_ids, offset): reply = kc.get_shell_msg(timeout=TIMEOUT) _check_status(reply['content']) assert reply['content']['execution_count'] == i assert reply['parent_header']['msg_id'] == msg_id def test_shutdown(): """Kernel exits after polite shutdown_request""" with new_kernel() as kc: km = kc.parent execute(u'a = 1', kc=kc) wait_for_idle(kc) kc.shutdown() for i in range(300): # 30s timeout if km.is_alive(): time.sleep(.1) else: break assert not km.is_alive() ipykernel-5.2.0/ipykernel/tests/test_kernelspec.py000066400000000000000000000072031363550014400224330ustar00rootroot00000000000000# Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import json import io import os import shutil import sys import tempfile try: from unittest import mock except ImportError: import mock # py2 from jupyter_core.paths import jupyter_data_dir from ipykernel.kernelspec import ( make_ipkernel_cmd, get_kernel_dict, write_kernel_spec, install, InstallIPythonKernelSpecApp, KERNEL_NAME, RESOURCES, ) import nose.tools as nt pjoin = os.path.join def test_make_ipkernel_cmd(): cmd = make_ipkernel_cmd() nt.assert_equal(cmd, [ sys.executable, '-m', 'ipykernel_launcher', '-f', '{connection_file}' ]) def assert_kernel_dict(d): assert d['argv'] == make_ipkernel_cmd() assert d['display_name'] == 'Python %i' % sys.version_info[0] assert d['language'] == 'python' def test_get_kernel_dict(): d = get_kernel_dict() assert_kernel_dict(d) def assert_kernel_dict_with_profile(d): nt.assert_equal(d['argv'], make_ipkernel_cmd( extra_arguments=["--profile", "test"])) assert d['display_name'] == 'Python %i' % sys.version_info[0] assert d['language'] == 'python' def test_get_kernel_dict_with_profile(): d = get_kernel_dict(["--profile", "test"]) assert_kernel_dict_with_profile(d) def assert_is_spec(path): for fname in os.listdir(RESOURCES): dst = pjoin(path, fname) assert os.path.exists(dst) kernel_json = pjoin(path, 'kernel.json') assert os.path.exists(kernel_json) with io.open(kernel_json, encoding='utf8') as f: json.load(f) def test_write_kernel_spec(): path = write_kernel_spec() assert_is_spec(path) shutil.rmtree(path) def test_write_kernel_spec_path(): path = os.path.join(tempfile.mkdtemp(), KERNEL_NAME) path2 = write_kernel_spec(path) assert path == path2 assert_is_spec(path) shutil.rmtree(path) def test_install_kernelspec(): path = tempfile.mkdtemp() try: test = InstallIPythonKernelSpecApp.launch_instance(argv=['--prefix', path]) assert_is_spec(os.path.join( path, 'share', 'jupyter', 'kernels', KERNEL_NAME)) finally: shutil.rmtree(path) def test_install_user(): tmp = tempfile.mkdtemp() with mock.patch.dict(os.environ, {'HOME': tmp}): install(user=True) data_dir = jupyter_data_dir() assert_is_spec(os.path.join(data_dir, 'kernels', KERNEL_NAME)) def test_install(): system_jupyter_dir = tempfile.mkdtemp() with mock.patch('jupyter_client.kernelspec.SYSTEM_JUPYTER_PATH', [system_jupyter_dir]): install() assert_is_spec(os.path.join(system_jupyter_dir, 'kernels', KERNEL_NAME)) def test_install_profile(): system_jupyter_dir = tempfile.mkdtemp() with mock.patch('jupyter_client.kernelspec.SYSTEM_JUPYTER_PATH', [system_jupyter_dir]): install(profile="Test") spec = os.path.join(system_jupyter_dir, 'kernels', KERNEL_NAME, "kernel.json") with open(spec) as f: spec = json.load(f) assert spec["display_name"].endswith(" [profile=Test]") nt.assert_equal(spec["argv"][-2:], ["--profile", "Test"]) def test_install_display_name_overrides_profile(): system_jupyter_dir = tempfile.mkdtemp() with mock.patch('jupyter_client.kernelspec.SYSTEM_JUPYTER_PATH', [system_jupyter_dir]): install(display_name="Display", profile="Test") spec = os.path.join(system_jupyter_dir, 'kernels', KERNEL_NAME, "kernel.json") with open(spec) as f: spec = json.load(f) assert spec["display_name"] == "Display" ipykernel-5.2.0/ipykernel/tests/test_message_spec.py000066400000000000000000000353651363550014400227500ustar00rootroot00000000000000"""Test suite for our zeromq-based message specification.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import re import sys from distutils.version import LooseVersion as V try: from queue import Empty # Py 3 except ImportError: from Queue import Empty # Py 2 import nose.tools as nt from nose.plugins.skip import SkipTest from traitlets import ( HasTraits, TraitError, Bool, Unicode, Dict, Integer, List, Enum ) from ipython_genutils.py3compat import string_types, iteritems from .utils import TIMEOUT, start_global_kernel, flush_channels, execute #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- KC = None def setup(): global KC KC = start_global_kernel() #----------------------------------------------------------------------------- # Message Spec References #----------------------------------------------------------------------------- class Reference(HasTraits): """ Base class for message spec specification testing. This class is the core of the message specification test. The idea is that child classes implement trait attributes for each message keys, so that message keys can be tested against these traits using :meth:`check` method. """ def check(self, d): """validate a dict against our traits""" for key in self.trait_names(): assert key in d # FIXME: always allow None, probably not a good idea if d[key] is None: continue try: setattr(self, key, d[key]) except TraitError as e: assert False, str(e) class Version(Unicode): def __init__(self, *args, **kwargs): self.min = kwargs.pop('min', None) self.max = kwargs.pop('max', None) kwargs['default_value'] = self.min super(Version, self).__init__(*args, **kwargs) def validate(self, obj, value): if self.min and V(value) < V(self.min): raise TraitError("bad version: %s < %s" % (value, self.min)) if self.max and (V(value) > V(self.max)): raise TraitError("bad version: %s > %s" % (value, self.max)) class RMessage(Reference): msg_id = Unicode() msg_type = Unicode() header = Dict() parent_header = Dict() content = Dict() def check(self, d): super(RMessage, self).check(d) RHeader().check(self.header) if self.parent_header: RHeader().check(self.parent_header) class RHeader(Reference): msg_id = Unicode() msg_type = Unicode() session = Unicode() username = Unicode() version = Version(min='5.0') mime_pat = re.compile(r'^[\w\-\+\.]+/[\w\-\+\.]+$') class MimeBundle(Reference): metadata = Dict() data = Dict() def _data_changed(self, name, old, new): for k,v in iteritems(new): assert mime_pat.match(k) assert isinstance(v, string_types) # shell replies class Reply(Reference): status = Enum((u'ok', u'error'), default_value=u'ok') class ExecuteReply(Reply): execution_count = Integer() def check(self, d): Reference.check(self, d) if d['status'] == 'ok': ExecuteReplyOkay().check(d) elif d['status'] == 'error': ExecuteReplyError().check(d) class ExecuteReplyOkay(Reply): status = Enum(('ok',)) user_expressions = Dict() class ExecuteReplyError(Reply): ename = Unicode() evalue = Unicode() traceback = List(Unicode()) class InspectReply(Reply, MimeBundle): found = Bool() class ArgSpec(Reference): args = List(Unicode()) varargs = Unicode() varkw = Unicode() defaults = List() class Status(Reference): execution_state = Enum((u'busy', u'idle', u'starting'), default_value=u'busy') class CompleteReply(Reply): matches = List(Unicode()) cursor_start = Integer() cursor_end = Integer() status = Unicode() class LanguageInfo(Reference): name = Unicode('python') version = Unicode(sys.version.split()[0]) class KernelInfoReply(Reply): protocol_version = Version(min='5.0') implementation = Unicode('ipython') implementation_version = Version(min='2.1') language_info = Dict() banner = Unicode() def check(self, d): Reference.check(self, d) LanguageInfo().check(d['language_info']) class ConnectReply(Reference): shell_port = Integer() control_port = Integer() stdin_port = Integer() iopub_port = Integer() hb_port = Integer() class CommInfoReply(Reply): comms = Dict() class IsCompleteReply(Reference): status = Enum((u'complete', u'incomplete', u'invalid', u'unknown'), default_value=u'complete') def check(self, d): Reference.check(self, d) if d['status'] == 'incomplete': IsCompleteReplyIncomplete().check(d) class IsCompleteReplyIncomplete(Reference): indent = Unicode() # IOPub messages class ExecuteInput(Reference): code = Unicode() execution_count = Integer() class Error(ExecuteReplyError): """Errors are the same as ExecuteReply, but without status""" status = None # no status field class Stream(Reference): name = Enum((u'stdout', u'stderr'), default_value=u'stdout') text = Unicode() class DisplayData(MimeBundle): pass class ExecuteResult(MimeBundle): execution_count = Integer() class HistoryReply(Reply): history = List(List()) references = { 'execute_reply' : ExecuteReply(), 'inspect_reply' : InspectReply(), 'status' : Status(), 'complete_reply' : CompleteReply(), 'kernel_info_reply': KernelInfoReply(), 'connect_reply': ConnectReply(), 'comm_info_reply': CommInfoReply(), 'is_complete_reply': IsCompleteReply(), 'execute_input' : ExecuteInput(), 'execute_result' : ExecuteResult(), 'history_reply' : HistoryReply(), 'error' : Error(), 'stream' : Stream(), 'display_data' : DisplayData(), 'header' : RHeader(), } """ Specifications of `content` part of the reply messages. """ def validate_message(msg, msg_type=None, parent=None): """validate a message This is a generator, and must be iterated through to actually trigger each test. If msg_type and/or parent are given, the msg_type and/or parent msg_id are compared with the given values. """ RMessage().check(msg) if msg_type: assert msg['msg_type'] == msg_type if parent: assert msg['parent_header']['msg_id'] == parent content = msg['content'] ref = references[msg['msg_type']] ref.check(content) #----------------------------------------------------------------------------- # Tests #----------------------------------------------------------------------------- # Shell channel def test_execute(): flush_channels() msg_id = KC.execute(code='x=1') reply = KC.get_shell_msg(timeout=TIMEOUT) validate_message(reply, 'execute_reply', msg_id) def test_execute_silent(): flush_channels() msg_id, reply = execute(code='x=1', silent=True) # flush status=idle status = KC.iopub_channel.get_msg(timeout=TIMEOUT) validate_message(status, 'status', msg_id) assert status['content']['execution_state'] == 'idle' nt.assert_raises(Empty, KC.iopub_channel.get_msg, timeout=0.1) count = reply['execution_count'] msg_id, reply = execute(code='x=2', silent=True) # flush status=idle status = KC.iopub_channel.get_msg(timeout=TIMEOUT) validate_message(status, 'status', msg_id) assert status['content']['execution_state'] == 'idle' nt.assert_raises(Empty, KC.iopub_channel.get_msg, timeout=0.1) count_2 = reply['execution_count'] assert count_2 == count def test_execute_error(): flush_channels() msg_id, reply = execute(code='1/0') assert reply['status'] == 'error' assert reply['ename'] == 'ZeroDivisionError' error = KC.iopub_channel.get_msg(timeout=TIMEOUT) validate_message(error, 'error', msg_id) def test_execute_inc(): """execute request should increment execution_count""" flush_channels() msg_id, reply = execute(code='x=1') count = reply['execution_count'] flush_channels() msg_id, reply = execute(code='x=2') count_2 = reply['execution_count'] assert count_2 == count+1 def test_execute_stop_on_error(): """execute request should not abort execution queue with stop_on_error False""" flush_channels() fail = '\n'.join([ # sleep to ensure subsequent message is waiting in the queue to be aborted 'import time', 'time.sleep(0.5)', 'raise ValueError', ]) KC.execute(code=fail) msg_id = KC.execute(code='print("Hello")') KC.get_shell_msg(timeout=TIMEOUT) reply = KC.get_shell_msg(timeout=TIMEOUT) assert reply['content']['status'] == 'aborted' flush_channels() KC.execute(code=fail, stop_on_error=False) msg_id = KC.execute(code='print("Hello")') KC.get_shell_msg(timeout=TIMEOUT) reply = KC.get_shell_msg(timeout=TIMEOUT) assert reply['content']['status'] == 'ok' def test_user_expressions(): flush_channels() msg_id, reply = execute(code='x=1', user_expressions=dict(foo='x+1')) user_expressions = reply['user_expressions'] nt.assert_equal(user_expressions, {u'foo': { u'status': u'ok', u'data': {u'text/plain': u'2'}, u'metadata': {}, }}) def test_user_expressions_fail(): flush_channels() msg_id, reply = execute(code='x=0', user_expressions=dict(foo='nosuchname')) user_expressions = reply['user_expressions'] foo = user_expressions['foo'] assert foo['status'] == 'error' assert foo['ename'] == 'NameError' def test_oinfo(): flush_channels() msg_id = KC.inspect('a') reply = KC.get_shell_msg(timeout=TIMEOUT) validate_message(reply, 'inspect_reply', msg_id) def test_oinfo_found(): flush_channels() msg_id, reply = execute(code='a=5') msg_id = KC.inspect('a') reply = KC.get_shell_msg(timeout=TIMEOUT) validate_message(reply, 'inspect_reply', msg_id) content = reply['content'] assert content['found'] text = content['data']['text/plain'] assert 'Type:' in text assert 'Docstring:' in text def test_oinfo_detail(): flush_channels() msg_id, reply = execute(code='ip=get_ipython()') msg_id = KC.inspect('ip.object_inspect', cursor_pos=10, detail_level=1) reply = KC.get_shell_msg(timeout=TIMEOUT) validate_message(reply, 'inspect_reply', msg_id) content = reply['content'] assert content['found'] text = content['data']['text/plain'] assert 'Signature:' in text assert 'Source:' in text def test_oinfo_not_found(): flush_channels() msg_id = KC.inspect('dne') reply = KC.get_shell_msg(timeout=TIMEOUT) validate_message(reply, 'inspect_reply', msg_id) content = reply['content'] assert not content['found'] def test_complete(): flush_channels() msg_id, reply = execute(code="alpha = albert = 5") msg_id = KC.complete('al', 2) reply = KC.get_shell_msg(timeout=TIMEOUT) validate_message(reply, 'complete_reply', msg_id) matches = reply['content']['matches'] for name in ('alpha', 'albert'): assert name in matches def test_kernel_info_request(): flush_channels() msg_id = KC.kernel_info() reply = KC.get_shell_msg(timeout=TIMEOUT) validate_message(reply, 'kernel_info_reply', msg_id) def test_connect_request(): flush_channels() msg = KC.session.msg('connect_request') KC.shell_channel.send(msg) return msg['header']['msg_id'] msg_id = KC.kernel_info() reply = KC.get_shell_msg(timeout=TIMEOUT) validate_message(reply, 'connect_reply', msg_id) def test_comm_info_request(): flush_channels() if not hasattr(KC, 'comm_info'): raise SkipTest() msg_id = KC.comm_info() reply = KC.get_shell_msg(timeout=TIMEOUT) validate_message(reply, 'comm_info_reply', msg_id) def test_single_payload(): """ We want to test the set_next_input is not triggered several time per cell. This is (was ?) mostly due to the fact that `?` in a loop would trigger several set_next_input. I'm tempted to thing that we actually want to _allow_ multiple set_next_input (that's users' choice). But that `?` itself (and ?'s transform) should avoid setting multiple set_next_input). """ flush_channels() msg_id, reply = execute(code="ip = get_ipython()\n" "for i in range(3):\n" " ip.set_next_input('Hello There')\n") payload = reply['payload'] next_input_pls = [pl for pl in payload if pl["source"] == "set_next_input"] assert len(next_input_pls) == 1 def test_is_complete(): flush_channels() msg_id = KC.is_complete("a = 1") reply = KC.get_shell_msg(timeout=TIMEOUT) validate_message(reply, 'is_complete_reply', msg_id) def test_history_range(): flush_channels() msg_id_exec = KC.execute(code='x=1', store_history = True) reply_exec = KC.get_shell_msg(timeout=TIMEOUT) msg_id = KC.history(hist_access_type = 'range', raw = True, output = True, start = 1, stop = 2, session = 0) reply = KC.get_shell_msg(timeout=TIMEOUT) validate_message(reply, 'history_reply', msg_id) content = reply['content'] assert len(content['history']) == 1 def test_history_tail(): flush_channels() msg_id_exec = KC.execute(code='x=1', store_history = True) reply_exec = KC.get_shell_msg(timeout=TIMEOUT) msg_id = KC.history(hist_access_type = 'tail', raw = True, output = True, n = 1, session = 0) reply = KC.get_shell_msg(timeout=TIMEOUT) validate_message(reply, 'history_reply', msg_id) content = reply['content'] assert len(content['history']) == 1 def test_history_search(): flush_channels() msg_id_exec = KC.execute(code='x=1', store_history = True) reply_exec = KC.get_shell_msg(timeout=TIMEOUT) msg_id = KC.history(hist_access_type = 'search', raw = True, output = True, n = 1, pattern = '*', session = 0) reply = KC.get_shell_msg(timeout=TIMEOUT) validate_message(reply, 'history_reply', msg_id) content = reply['content'] assert len(content['history']) == 1 # IOPub channel def test_stream(): flush_channels() msg_id, reply = execute("print('hi')") stdout = KC.iopub_channel.get_msg(timeout=TIMEOUT) validate_message(stdout, 'stream', msg_id) content = stdout['content'] assert content['text'] == u'hi\n' def test_display_data(): flush_channels() msg_id, reply = execute("from IPython.core.display import display; display(1)") display = KC.iopub_channel.get_msg(timeout=TIMEOUT) validate_message(display, 'display_data', parent=msg_id) data = display['content']['data'] assert data['text/plain'] == u'1' ipykernel-5.2.0/ipykernel/tests/test_pickleutil.py000066400000000000000000000022611363550014400224440ustar00rootroot00000000000000 import pickle from ipykernel.pickleutil import can, uncan, codeutil def interactive(f): f.__module__ = '__main__' return f def dumps(obj): return pickle.dumps(can(obj)) def loads(obj): return uncan(pickle.loads(obj)) def test_no_closure(): @interactive def foo(): a = 5 return a pfoo = dumps(foo) bar = loads(pfoo) assert foo() == bar() def test_generator_closure(): # this only creates a closure on Python 3 @interactive def foo(): i = 'i' r = [ i for j in (1,2) ] return r pfoo = dumps(foo) bar = loads(pfoo) assert foo() == bar() def test_nested_closure(): @interactive def foo(): i = 'i' def g(): return i return g() pfoo = dumps(foo) bar = loads(pfoo) assert foo() == bar() def test_closure(): i = 'i' @interactive def foo(): return i pfoo = dumps(foo) bar = loads(pfoo) assert foo() == bar() def test_uncan_bytes_buffer(): data = b'data' canned = can(data) canned.buffers = [memoryview(buf) for buf in canned.buffers] out = uncan(canned) assert out == data ipykernel-5.2.0/ipykernel/tests/test_serialize.py000066400000000000000000000127051363550014400222720ustar00rootroot00000000000000"""test serialization tools""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import pickle from collections import namedtuple from ipykernel.serialize import serialize_object, deserialize_object from IPython.testing import decorators as dec from ipykernel.pickleutil import CannedArray, CannedClass, interactive def roundtrip(obj): """roundtrip an object through serialization""" bufs = serialize_object(obj) obj2, remainder = deserialize_object(bufs) assert remainder == [] return obj2 SHAPES = ((100,), (1024,10), (10,8,6,5), (), (0,)) DTYPES = ('uint8', 'float64', 'int32', [('g', 'float32')], '|S10') def new_array(shape, dtype): import numpy return numpy.random.random(shape).astype(dtype) def test_roundtrip_simple(): for obj in [ 'hello', dict(a='b', b=10), [1,2,'hi'], (b'123', 'hello'), ]: obj2 = roundtrip(obj) assert obj == obj2 def test_roundtrip_nested(): for obj in [ dict(a=range(5), b={1:b'hello'}), [range(5),[range(3),(1,[b'whoda'])]], ]: obj2 = roundtrip(obj) assert obj == obj2 def test_roundtrip_buffered(): for obj in [ dict(a=b"x"*1025), b"hello"*500, [b"hello"*501, 1,2,3] ]: bufs = serialize_object(obj) assert len(bufs) == 2 obj2, remainder = deserialize_object(bufs) assert remainder == [] assert obj == obj2 def test_roundtrip_memoryview(): b = b'asdf' * 1025 view = memoryview(b) bufs = serialize_object(view) assert len(bufs) == 2 v2, remainder = deserialize_object(bufs) assert remainder == [] assert v2.tobytes() == b @dec.skip_without('numpy') def test_numpy(): import numpy from numpy.testing.utils import assert_array_equal for shape in SHAPES: for dtype in DTYPES: A = new_array(shape, dtype=dtype) bufs = serialize_object(A) bufs = [memoryview(b) for b in bufs] B, r = deserialize_object(bufs) assert r == [] assert A.shape == B.shape assert A.dtype == B.dtype assert_array_equal(A,B) @dec.skip_without('numpy') def test_recarray(): import numpy from numpy.testing.utils import assert_array_equal for shape in SHAPES: for dtype in [ [('f', float), ('s', '|S10')], [('n', int), ('s', '|S1'), ('u', 'uint32')], ]: A = new_array(shape, dtype=dtype) bufs = serialize_object(A) B, r = deserialize_object(bufs) assert r == [] assert A.shape == B.shape assert A.dtype == B.dtype assert_array_equal(A,B) @dec.skip_without('numpy') def test_numpy_in_seq(): import numpy from numpy.testing.utils import assert_array_equal for shape in SHAPES: for dtype in DTYPES: A = new_array(shape, dtype=dtype) bufs = serialize_object((A,1,2,b'hello')) canned = pickle.loads(bufs[0]) assert isinstance(canned[0], CannedArray) tup, r = deserialize_object(bufs) B = tup[0] assert r == [] assert A.shape == B.shape assert A.dtype == B.dtype assert_array_equal(A,B) @dec.skip_without('numpy') def test_numpy_in_dict(): import numpy from numpy.testing.utils import assert_array_equal for shape in SHAPES: for dtype in DTYPES: A = new_array(shape, dtype=dtype) bufs = serialize_object(dict(a=A,b=1,c=range(20))) canned = pickle.loads(bufs[0]) assert isinstance(canned['a'], CannedArray) d, r = deserialize_object(bufs) B = d['a'] assert r == [] assert A.shape == B.shape assert A.dtype == B.dtype assert_array_equal(A,B) def test_class(): @interactive class C(object): a=5 bufs = serialize_object(dict(C=C)) canned = pickle.loads(bufs[0]) assert isinstance(canned['C'], CannedClass) d, r = deserialize_object(bufs) C2 = d['C'] assert C2.a == C.a def test_class_oldstyle(): @interactive class C: a=5 bufs = serialize_object(dict(C=C)) canned = pickle.loads(bufs[0]) assert isinstance(canned['C'], CannedClass) d, r = deserialize_object(bufs) C2 = d['C'] assert C2.a == C.a def test_tuple(): tup = (lambda x:x, 1) bufs = serialize_object(tup) canned = pickle.loads(bufs[0]) assert isinstance(canned, tuple) t2, r = deserialize_object(bufs) assert t2[0](t2[1]) == tup[0](tup[1]) point = namedtuple('point', 'x y') def test_namedtuple(): p = point(1,2) bufs = serialize_object(p) canned = pickle.loads(bufs[0]) assert isinstance(canned, point) p2, r = deserialize_object(bufs, globals()) assert p2.x == p.x assert p2.y == p.y def test_list(): lis = [lambda x:x, 1] bufs = serialize_object(lis) canned = pickle.loads(bufs[0]) assert isinstance(canned, list) l2, r = deserialize_object(bufs) assert l2[0](l2[1]) == lis[0](lis[1]) def test_class_inheritance(): @interactive class C(object): a=5 @interactive class D(C): b=10 bufs = serialize_object(dict(D=D)) canned = pickle.loads(bufs[0]) assert isinstance(canned['D'], CannedClass) d, r = deserialize_object(bufs) D2 = d['D'] assert D2.a == D.a assert D2.b == D.b ipykernel-5.2.0/ipykernel/tests/test_start_kernel.py000066400000000000000000000034601363550014400227760ustar00rootroot00000000000000from .test_embed_kernel import setup_kernel from flaky import flaky TIMEOUT = 15 @flaky(max_runs=3) def test_ipython_start_kernel_userns(): cmd = ('from IPython import start_kernel\n' 'ns = {"tre": 123}\n' 'start_kernel(user_ns=ns)') with setup_kernel(cmd) as client: msg_id = client.inspect('tre') msg = client.get_shell_msg(block=True, timeout=TIMEOUT) content = msg['content'] assert content['found'] text = content['data']['text/plain'] assert u'123' in text # user_module should be an instance of DummyMod msg_id = client.execute("usermod = get_ipython().user_module") msg = client.get_shell_msg(block=True, timeout=TIMEOUT) content = msg['content'] assert content['status'] == u'ok' msg_id = client.inspect('usermod') msg = client.get_shell_msg(block=True, timeout=TIMEOUT) content = msg['content'] assert content['found'] text = content['data']['text/plain'] assert u'DummyMod' in text @flaky(max_runs=3) def test_ipython_start_kernel_no_userns(): # Issue #4188 - user_ns should be passed to shell as None, not {} cmd = ('from IPython import start_kernel\n' 'start_kernel()') with setup_kernel(cmd) as client: # user_module should not be an instance of DummyMod msg_id = client.execute("usermod = get_ipython().user_module") msg = client.get_shell_msg(block=True, timeout=TIMEOUT) content = msg['content'] assert content['status'] == u'ok' msg_id = client.inspect('usermod') msg = client.get_shell_msg(block=True, timeout=TIMEOUT) content = msg['content'] assert content['found'] text = content['data']['text/plain'] assert u'DummyMod' not in text ipykernel-5.2.0/ipykernel/tests/test_zmq_shell.py000066400000000000000000000134011363550014400222730ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Tests for zmq shell / display publisher. """ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import os try: from queue import Queue except ImportError: # py2 from Queue import Queue from threading import Thread import unittest from traitlets import Int import zmq from ipykernel.zmqshell import ZMQDisplayPublisher from jupyter_client.session import Session class NoReturnDisplayHook(object): """ A dummy DisplayHook which allows us to monitor the number of times an object is called, but which does *not* return a message when it is called. """ call_count = 0 def __call__(self, obj): self.call_count += 1 class ReturnDisplayHook(NoReturnDisplayHook): """ A dummy DisplayHook with the same counting ability as its base class, but which also returns the same message when it is called. """ def __call__(self, obj): super(ReturnDisplayHook, self).__call__(obj) return obj class CounterSession(Session): """ This is a simple subclass to allow us to count the calls made to the session object by the display publisher. """ send_count = Int(0) def send(self, *args, **kwargs): """ A trivial override to just augment the existing call with an increment to the send counter. """ self.send_count += 1 super(CounterSession, self).send(*args, **kwargs) class ZMQDisplayPublisherTests(unittest.TestCase): """ Tests the ZMQDisplayPublisher in zmqshell.py """ def setUp(self): self.context = zmq.Context() self.socket = self.context.socket(zmq.PUB) self.session = CounterSession() self.disp_pub = ZMQDisplayPublisher( session = self.session, pub_socket = self.socket ) def tearDown(self): """ We need to close the socket in order to proceed with the tests. TODO - There is still an open file handler to '/dev/null', presumably created by zmq. """ self.disp_pub.clear_output() self.socket.close() self.context.term() def test_display_publisher_creation(self): """ Since there's no explicit constructor, here we confirm that keyword args get assigned correctly, and override the defaults. """ assert self.disp_pub.session == self.session assert self.disp_pub.pub_socket == self.socket def test_thread_local_hooks(self): """ Confirms that the thread_local attribute is correctly initialised with an empty list for the display hooks """ assert self.disp_pub._hooks == [] def hook(msg): return msg self.disp_pub.register_hook(hook) assert self.disp_pub._hooks == [hook] q = Queue() def set_thread_hooks(): q.put(self.disp_pub._hooks) t = Thread(target=set_thread_hooks) t.start() thread_hooks = q.get(timeout=10) assert thread_hooks == [] def test_publish(self): """ Publish should prepare the message and eventually call `send` by default. """ data = dict(a = 1) assert self.session.send_count == 0 self.disp_pub.publish(data) assert self.session.send_count == 1 def test_display_hook_halts_send(self): """ If a hook is installed, and on calling the object it does *not* return a message, then we assume that the message has been consumed, and should not be processed (`sent`) in the normal manner. """ data = dict(a = 1) hook = NoReturnDisplayHook() self.disp_pub.register_hook(hook) assert hook.call_count == 0 assert self.session.send_count == 0 self.disp_pub.publish(data) assert hook.call_count == 1 assert self.session.send_count == 0 def test_display_hook_return_calls_send(self): """ If a hook is installed and on calling the object it returns a new message, then we assume that this is just a message transformation, and the message should be sent in the usual manner. """ data = dict(a=1) hook = ReturnDisplayHook() self.disp_pub.register_hook(hook) assert hook.call_count == 0 assert self.session.send_count == 0 self.disp_pub.publish(data) assert hook.call_count == 1 assert self.session.send_count == 1 def test_unregister_hook(self): """ Once a hook is unregistered, it should not be called during `publish`. """ data = dict(a = 1) hook = NoReturnDisplayHook() self.disp_pub.register_hook(hook) assert hook.call_count == 0 assert self.session.send_count == 0 self.disp_pub.publish(data) assert hook.call_count == 1 assert self.session.send_count == 0 # # After unregistering the `NoReturn` hook, any calls # to publish should *not* got through the DisplayHook, # but should instead hit the usual `session.send` call # at the end. # # As a result, the hook call count should *not* increase, # but the session send count *should* increase. # first = self.disp_pub.unregister_hook(hook) self.disp_pub.publish(data) self.assertTrue(first) assert hook.call_count == 1 assert self.session.send_count == 1 # # If a hook is not installed, `unregister_hook` # should return false. # second = self.disp_pub.unregister_hook(hook) self.assertFalse(second) if __name__ == '__main__': unittest.main() ipykernel-5.2.0/ipykernel/tests/utils.py000066400000000000000000000112211363550014400203740ustar00rootroot00000000000000"""utilities for testing IPython kernels""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import print_function import atexit import os import sys from contextlib import contextmanager from subprocess import PIPE, STDOUT try: from queue import Empty # Py 3 except ImportError: from Queue import Empty # Py 2 import nose from jupyter_client import manager STARTUP_TIMEOUT = 60 TIMEOUT = 15 KM = None KC = None def start_new_kernel(**kwargs): """start a new kernel, and return its Manager and Client Integrates with our output capturing for tests. """ kwargs['stderr'] = STDOUT try: kwargs['stdout'] = nose.iptest_stdstreams_fileno() except AttributeError: pass return manager.start_new_kernel(startup_timeout=STARTUP_TIMEOUT, **kwargs) def flush_channels(kc=None): """flush any messages waiting on the queue""" from .test_message_spec import validate_message if kc is None: kc = KC for channel in (kc.shell_channel, kc.iopub_channel): while True: try: msg = channel.get_msg(block=True, timeout=0.1) except Empty: break else: validate_message(msg) def execute(code='', kc=None, **kwargs): """wrapper for doing common steps for validating an execution request""" from .test_message_spec import validate_message if kc is None: kc = KC msg_id = kc.execute(code=code, **kwargs) reply = kc.get_shell_msg(timeout=TIMEOUT) validate_message(reply, 'execute_reply', msg_id) busy = kc.get_iopub_msg(timeout=TIMEOUT) validate_message(busy, 'status', msg_id) assert busy['content']['execution_state'] == 'busy' if not kwargs.get('silent'): execute_input = kc.get_iopub_msg(timeout=TIMEOUT) validate_message(execute_input, 'execute_input', msg_id) assert execute_input['content']['code'] == code # show tracebacks if present for debugging if reply['content'].get('traceback'): print('\n'.join(reply['content']['traceback']), file=sys.stderr) return msg_id, reply['content'] def start_global_kernel(): """start the global kernel (if it isn't running) and return its client""" global KM, KC if KM is None: KM, KC = start_new_kernel() atexit.register(stop_global_kernel) else: flush_channels(KC) return KC @contextmanager def kernel(): """Context manager for the global kernel instance Should be used for most kernel tests Returns ------- kernel_client: connected KernelClient instance """ yield start_global_kernel() def uses_kernel(test_f): """Decorator for tests that use the global kernel""" def wrapped_test(): with kernel() as kc: test_f(kc) wrapped_test.__doc__ = test_f.__doc__ wrapped_test.__name__ = test_f.__name__ return wrapped_test def stop_global_kernel(): """Stop the global shared kernel instance, if it exists""" global KM, KC KC.stop_channels() KC = None if KM is None: return KM.shutdown_kernel(now=True) KM = None def new_kernel(argv=None): """Context manager for a new kernel in a subprocess Should only be used for tests where the kernel must not be re-used. Returns ------- kernel_client: connected KernelClient instance """ kwargs = {'stderr': STDOUT} try: kwargs['stdout'] = nose.iptest_stdstreams_fileno() except AttributeError: pass if argv is not None: kwargs['extra_arguments'] = argv return manager.run_kernel(**kwargs) def assemble_output(iopub): """assemble stdout/err from an execution""" stdout = '' stderr = '' while True: msg = iopub.get_msg(block=True, timeout=1) msg_type = msg['msg_type'] content = msg['content'] if msg_type == 'status' and content['execution_state'] == 'idle': # idle message signals end of output break elif msg['msg_type'] == 'stream': if content['name'] == 'stdout': stdout += content['text'] elif content['name'] == 'stderr': stderr += content['text'] else: raise KeyError("bad stream: %r" % content['name']) else: # other output, ignored pass return stdout, stderr def wait_for_idle(kc): while True: msg = kc.iopub_channel.get_msg(block=True, timeout=1) msg_type = msg['msg_type'] content = msg['content'] if msg_type == 'status' and content['execution_state'] == 'idle': break ipykernel-5.2.0/ipykernel/zmqshell.py000066400000000000000000000522631363550014400177440ustar00rootroot00000000000000# -*- coding: utf-8 -*- """A ZMQ-based subclass of InteractiveShell. This code is meant to ease the refactoring of the base InteractiveShell into something with a cleaner architecture for 2-process use, without actually breaking InteractiveShell itself. So we're doing something a bit ugly, where we subclass and override what we want to fix. Once this is working well, we can go back to the base class and refactor the code for a cleaner inheritance implementation that doesn't rely on so much monkeypatching. But this lets us maintain a fully working IPython as we develop the new machinery. This should thus be thought of as scaffolding. """ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import print_function import os import sys import time import warnings from threading import local from tornado import ioloop from IPython.core.interactiveshell import ( InteractiveShell, InteractiveShellABC ) from IPython.core import page from IPython.core.autocall import ZMQExitAutocall from IPython.core.displaypub import DisplayPublisher from IPython.core.error import UsageError from IPython.core.magics import MacroToEdit, CodeMagics from IPython.core.magic import magics_class, line_magic, Magics from IPython.core import payloadpage from IPython.core.usage import default_banner from IPython.display import display, Javascript from ipykernel import ( get_connection_file, get_connection_info, connect_qtconsole ) from IPython.utils import openpy from ipykernel.jsonutil import json_clean, encode_images from IPython.utils.process import arg_split from ipython_genutils import py3compat from ipython_genutils.py3compat import unicode_type from traitlets import ( Instance, Type, Dict, CBool, CBytes, Any, default, observe ) from ipykernel.displayhook import ZMQShellDisplayHook from jupyter_core.paths import jupyter_runtime_dir from jupyter_client.session import extract_header, Session #----------------------------------------------------------------------------- # Functions and classes #----------------------------------------------------------------------------- class ZMQDisplayPublisher(DisplayPublisher): """A display publisher that publishes data using a ZeroMQ PUB socket.""" session = Instance(Session, allow_none=True) pub_socket = Any(allow_none=True) parent_header = Dict({}) topic = CBytes(b'display_data') # thread_local: # An attribute used to ensure the correct output message # is processed. See ipykernel Issue 113 for a discussion. _thread_local = Any() def set_parent(self, parent): """Set the parent for outbound messages.""" self.parent_header = extract_header(parent) def _flush_streams(self): """flush IO Streams prior to display""" sys.stdout.flush() sys.stderr.flush() @default('_thread_local') def _default_thread_local(self): """Initialize our thread local storage""" return local() @property def _hooks(self): if not hasattr(self._thread_local, 'hooks'): # create new list for a new thread self._thread_local.hooks = [] return self._thread_local.hooks def publish(self, data, metadata=None, source=None, transient=None, update=False, ): """Publish a display-data message Parameters ---------- data: dict A mime-bundle dict, keyed by mime-type. metadata: dict, optional Metadata associated with the data. transient: dict, optional, keyword-only Transient data that may only be relevant during a live display, such as display_id. Transient data should not be persisted to documents. update: bool, optional, keyword-only If True, send an update_display_data message instead of display_data. """ self._flush_streams() if metadata is None: metadata = {} if transient is None: transient = {} self._validate_data(data, metadata) content = {} content['data'] = encode_images(data) content['metadata'] = metadata content['transient'] = transient msg_type = 'update_display_data' if update else 'display_data' # Use 2-stage process to send a message, # in order to put it through the transform # hooks before potentially sending. msg = self.session.msg( msg_type, json_clean(content), parent=self.parent_header ) # Each transform either returns a new # message or None. If None is returned, # the message has been 'used' and we return. for hook in self._hooks: msg = hook(msg) if msg is None: return self.session.send( self.pub_socket, msg, ident=self.topic, ) def clear_output(self, wait=False): """Clear output associated with the current execution (cell). Parameters ---------- wait: bool (default: False) If True, the output will not be cleared immediately, instead waiting for the next display before clearing. This reduces bounce during repeated clear & display loops. """ content = dict(wait=wait) self._flush_streams() self.session.send( self.pub_socket, u'clear_output', content, parent=self.parent_header, ident=self.topic, ) def register_hook(self, hook): """ Registers a hook with the thread-local storage. Parameters ---------- hook : Any callable object Returns ------- Either a publishable message, or `None`. The DisplayHook objects must return a message from the __call__ method if they still require the `session.send` method to be called after transformation. Returning `None` will halt that execution path, and session.send will not be called. """ self._hooks.append(hook) def unregister_hook(self, hook): """ Un-registers a hook with the thread-local storage. Parameters ---------- hook: Any callable object which has previously been registered as a hook. Returns ------- bool - `True` if the hook was removed, `False` if it wasn't found. """ try: self._hooks.remove(hook) return True except ValueError: return False @magics_class class KernelMagics(Magics): #------------------------------------------------------------------------ # Magic overrides #------------------------------------------------------------------------ # Once the base class stops inheriting from magic, this code needs to be # moved into a separate machinery as well. For now, at least isolate here # the magics which this class needs to implement differently from the base # class, or that are unique to it. _find_edit_target = CodeMagics._find_edit_target @line_magic def edit(self, parameter_s='', last_call=['','']): """Bring up an editor and execute the resulting code. Usage: %edit [options] [args] %edit runs an external text editor. You will need to set the command for this editor via the ``TerminalInteractiveShell.editor`` option in your configuration file before it will work. This command allows you to conveniently edit multi-line code right in your IPython session. If called without arguments, %edit opens up an empty editor with a temporary file and will execute the contents of this file when you close it (don't forget to save it!). Options: -n Open the editor at a specified line number. By default, the IPython editor hook uses the unix syntax 'editor +N filename', but you can configure this by providing your own modified hook if your favorite editor supports line-number specifications with a different syntax. -p Call the editor with the same data as the previous time it was used, regardless of how long ago (in your current session) it was. -r Use 'raw' input. This option only applies to input taken from the user's history. By default, the 'processed' history is used, so that magics are loaded in their transformed version to valid Python. If this option is given, the raw input as typed as the command line is used instead. When you exit the editor, it will be executed by IPython's own processor. Arguments: If arguments are given, the following possibilities exist: - The arguments are numbers or pairs of colon-separated numbers (like 1 4:8 9). These are interpreted as lines of previous input to be loaded into the editor. The syntax is the same of the %macro command. - If the argument doesn't start with a number, it is evaluated as a variable and its contents loaded into the editor. You can thus edit any string which contains python code (including the result of previous edits). - If the argument is the name of an object (other than a string), IPython will try to locate the file where it was defined and open the editor at the point where it is defined. You can use ``%edit function`` to load an editor exactly at the point where 'function' is defined, edit it and have the file be executed automatically. If the object is a macro (see %macro for details), this opens up your specified editor with a temporary file containing the macro's data. Upon exit, the macro is reloaded with the contents of the file. Note: opening at an exact line is only supported under Unix, and some editors (like kedit and gedit up to Gnome 2.8) do not understand the '+NUMBER' parameter necessary for this feature. Good editors like (X)Emacs, vi, jed, pico and joe all do. - If the argument is not found as a variable, IPython will look for a file with that name (adding .py if necessary) and load it into the editor. It will execute its contents with execfile() when you exit, loading any code in the file into your interactive namespace. Unlike in the terminal, this is designed to use a GUI editor, and we do not know when it has closed. So the file you edit will not be automatically executed or printed. Note that %edit is also available through the alias %ed. """ opts,args = self.parse_options(parameter_s, 'prn:') try: filename, lineno, _ = CodeMagics._find_edit_target(self.shell, args, opts, last_call) except MacroToEdit: # TODO: Implement macro editing over 2 processes. print("Macro editing not yet implemented in 2-process model.") return # Make sure we send to the client an absolute path, in case the working # directory of client and kernel don't match filename = os.path.abspath(filename) payload = { 'source' : 'edit_magic', 'filename' : filename, 'line_number' : lineno } self.shell.payload_manager.write_payload(payload) # A few magics that are adapted to the specifics of using pexpect and a # remote terminal @line_magic def clear(self, arg_s): """Clear the terminal.""" if os.name == 'posix': self.shell.system("clear") else: self.shell.system("cls") if os.name == 'nt': # This is the usual name in windows cls = line_magic('cls')(clear) # Terminal pagers won't work over pexpect, but we do have our own pager @line_magic def less(self, arg_s): """Show a file through the pager. Files ending in .py are syntax-highlighted.""" if not arg_s: raise UsageError('Missing filename.') if arg_s.endswith('.py'): cont = self.shell.pycolorize(openpy.read_py_file(arg_s, skip_encoding_cookie=False)) else: cont = open(arg_s).read() page.page(cont) more = line_magic('more')(less) # Man calls a pager, so we also need to redefine it if os.name == 'posix': @line_magic def man(self, arg_s): """Find the man page for the given command and display in pager.""" page.page(self.shell.getoutput('man %s | col -b' % arg_s, split=False)) @line_magic def connect_info(self, arg_s): """Print information for connecting other clients to this kernel It will print the contents of this session's connection file, as well as shortcuts for local clients. In the simplest case, when called from the most recently launched kernel, secondary clients can be connected, simply with: $> jupyter --existing """ try: connection_file = get_connection_file() info = get_connection_info(unpack=False) except Exception as e: warnings.warn("Could not get connection info: %r" % e) return # if it's in the default dir, truncate to basename if jupyter_runtime_dir() == os.path.dirname(connection_file): connection_file = os.path.basename(connection_file) print (info + '\n') print ("Paste the above JSON into a file, and connect with:\n" " $> jupyter --existing \n" "or, if you are local, you can connect with just:\n" " $> jupyter --existing {0}\n" "or even just:\n" " $> jupyter --existing\n" "if this is the most recent Jupyter kernel you have started.".format( connection_file ) ) @line_magic def qtconsole(self, arg_s): """Open a qtconsole connected to this kernel. Useful for connecting a qtconsole to running notebooks, for better debugging. """ # %qtconsole should imply bind_kernel for engines: # FIXME: move to ipyparallel Kernel subclass if 'ipyparallel' in sys.modules: from ipyparallel import bind_kernel bind_kernel() try: connect_qtconsole(argv=arg_split(arg_s, os.name=='posix')) except Exception as e: warnings.warn("Could not start qtconsole: %r" % e) return @line_magic def autosave(self, arg_s): """Set the autosave interval in the notebook (in seconds). The default value is 120, or two minutes. ``%autosave 0`` will disable autosave. This magic only has an effect when called from the notebook interface. It has no effect when called in a startup file. """ try: interval = int(arg_s) except ValueError: raise UsageError("%%autosave requires an integer, got %r" % arg_s) # javascript wants milliseconds milliseconds = 1000 * interval display(Javascript("IPython.notebook.set_autosave_interval(%i)" % milliseconds), include=['application/javascript'] ) if interval: print("Autosaving every %i seconds" % interval) else: print("Autosave disabled") class ZMQInteractiveShell(InteractiveShell): """A subclass of InteractiveShell for ZMQ.""" displayhook_class = Type(ZMQShellDisplayHook) display_pub_class = Type(ZMQDisplayPublisher) data_pub_class = Type('ipykernel.datapub.ZMQDataPublisher') kernel = Any() parent_header = Any() @default('banner1') def _default_banner1(self): return default_banner # Override the traitlet in the parent class, because there's no point using # readline for the kernel. Can be removed when the readline code is moved # to the terminal frontend. readline_use = CBool(False) # autoindent has no meaning in a zmqshell, and attempting to enable it # will print a warning in the absence of readline. autoindent = CBool(False) exiter = Instance(ZMQExitAutocall) @default('exiter') def _default_exiter(self): return ZMQExitAutocall(self) @observe('exit_now') def _update_exit_now(self, change): """stop eventloop when exit_now fires""" if change['new']: loop = self.kernel.io_loop loop.call_later(0.1, loop.stop) if self.kernel.eventloop: exit_hook = getattr(self.kernel.eventloop, 'exit_hook', None) if exit_hook: exit_hook(self.kernel) keepkernel_on_exit = None # Over ZeroMQ, GUI control isn't done with PyOS_InputHook as there is no # interactive input being read; we provide event loop support in ipkernel def enable_gui(self, gui): from .eventloops import enable_gui as real_enable_gui try: real_enable_gui(gui) self.active_eventloop = gui except ValueError as e: raise UsageError("%s" % e) def init_environment(self): """Configure the user's environment.""" env = os.environ # These two ensure 'ls' produces nice coloring on BSD-derived systems env['TERM'] = 'xterm-color' env['CLICOLOR'] = '1' # Since normal pagers don't work at all (over pexpect we don't have # single-key control of the subprocess), try to disable paging in # subprocesses as much as possible. env['PAGER'] = 'cat' env['GIT_PAGER'] = 'cat' def init_hooks(self): super(ZMQInteractiveShell, self).init_hooks() self.set_hook('show_in_pager', page.as_hook(payloadpage.page), 99) def init_data_pub(self): """Delay datapub init until request, for deprecation warnings""" pass @property def data_pub(self): if not hasattr(self, '_data_pub'): warnings.warn("InteractiveShell.data_pub is deprecated outside IPython parallel.", DeprecationWarning, stacklevel=2) self._data_pub = self.data_pub_class(parent=self) self._data_pub.session = self.display_pub.session self._data_pub.pub_socket = self.display_pub.pub_socket return self._data_pub @data_pub.setter def data_pub(self, pub): self._data_pub = pub def ask_exit(self): """Engage the exit actions.""" self.exit_now = (not self.keepkernel_on_exit) payload = dict( source='ask_exit', keepkernel=self.keepkernel_on_exit, ) self.payload_manager.write_payload(payload) def run_cell(self, *args, **kwargs): self._last_traceback = None return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs) def _showtraceback(self, etype, evalue, stb): # try to preserve ordering of tracebacks and print statements sys.stdout.flush() sys.stderr.flush() exc_content = { u'traceback' : stb, u'ename' : unicode_type(etype.__name__), u'evalue' : py3compat.safe_unicode(evalue), } dh = self.displayhook # Send exception info over pub socket for other clients than the caller # to pick up topic = None if dh.topic: topic = dh.topic.replace(b'execute_result', b'error') exc_msg = dh.session.send(dh.pub_socket, u'error', json_clean(exc_content), dh.parent_header, ident=topic) # FIXME - Once we rely on Python 3, the traceback is stored on the # exception object, so we shouldn't need to store it here. self._last_traceback = stb def set_next_input(self, text, replace=False): """Send the specified text to the frontend to be presented at the next input cell.""" payload = dict( source='set_next_input', text=text, replace=replace, ) self.payload_manager.write_payload(payload) def set_parent(self, parent): """Set the parent header for associating output with its triggering input""" self.parent_header = parent self.displayhook.set_parent(parent) self.display_pub.set_parent(parent) if hasattr(self, '_data_pub'): self.data_pub.set_parent(parent) try: sys.stdout.set_parent(parent) except AttributeError: pass try: sys.stderr.set_parent(parent) except AttributeError: pass def get_parent(self): return self.parent_header def init_magics(self): super(ZMQInteractiveShell, self).init_magics() self.register_magics(KernelMagics) self.magics_manager.register_alias('ed', 'edit') def init_virtualenv(self): # Overridden not to do virtualenv detection, because it's probably # not appropriate in a kernel. To use a kernel in a virtualenv, install # it inside the virtualenv. # https://ipython.readthedocs.io/en/latest/install/kernel_install.html pass InteractiveShellABC.register(ZMQInteractiveShell) ipykernel-5.2.0/ipykernel_launcher.py000066400000000000000000000007031363550014400177560ustar00rootroot00000000000000"""Entry point for launching an IPython kernel. This is separate from the ipykernel package so we can avoid doing imports until after removing the cwd from sys.path. """ import sys if __name__ == '__main__': # Remove the CWD from sys.path while we load stuff. # This is added back by InteractiveShellApp.init_path() if sys.path[0] == '': del sys.path[0] from ipykernel import kernelapp as app app.launch_new_instance() ipykernel-5.2.0/pyproject.toml000066400000000000000000000001621363550014400164340ustar00rootroot00000000000000[build-system] requires=[ "setuptools", "wheel", "ipython>=5", "jupyter_core>=4.2", "jupyter_client", ] ipykernel-5.2.0/readthedocs.yml000066400000000000000000000001241363550014400165260ustar00rootroot00000000000000python: version: 3.5 pip_install: true requirements_file: docs/requirements.txt ipykernel-5.2.0/setup.cfg000066400000000000000000000004061363550014400153420ustar00rootroot00000000000000[bdist_wheel] universal=0 [metadata] license_file = COPYING.md [nosetests] warningfilters= default |.* |DeprecationWarning |ipykernel.* error |.*invalid.* |DeprecationWarning |matplotlib.* ipykernel-5.2.0/setup.py000066400000000000000000000102371363550014400152360ustar00rootroot00000000000000#!/usr/bin/env python # coding: utf-8 # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import print_function # the name of the package name = 'ipykernel' #----------------------------------------------------------------------------- # Minimal Python version sanity check #----------------------------------------------------------------------------- import sys import re v = sys.version_info if v[:2] < (3, 5): error = "ERROR: %s requires Python version 3.5 or above." % name print(error, file=sys.stderr) sys.exit(1) #----------------------------------------------------------------------------- # get on with it #----------------------------------------------------------------------------- from glob import glob import os import shutil from setuptools import setup from setuptools.command.bdist_egg import bdist_egg class bdist_egg_disabled(bdist_egg): """Disabled version of bdist_egg Prevents setup.py install from performing setuptools' default easy_install, which it should never ever do. """ def run(self): sys.exit("Aborting implicit building of eggs. Use `pip install .` to install from source.") pjoin = os.path.join here = os.path.abspath(os.path.dirname(__file__)) pkg_root = pjoin(here, name) packages = [] for d, _, _ in os.walk(pjoin(here, name)): if os.path.exists(pjoin(d, '__init__.py')): packages.append(d[len(here)+1:].replace(os.path.sep, '.')) package_data = { 'ipykernel': ['resources/*.*'], } version_ns = {} with open(pjoin(here, name, '_version.py')) as f: exec(f.read(), {}, version_ns) current_version = version_ns['__version__'] loose_pep440re = re.compile(r'^(\d+)\.(\d+)\.(\d+((a|b|rc)\d+)?)(\.post\d+)?(\.dev\d*)?$') if not loose_pep440re.match(current_version): raise ValueError("Version number '%s' is not valid (should match [N!]N(.N)*[{a|b|rc}N][.postN][.devN])" % current_version) setup_args = dict( name=name, version=current_version, cmdclass={ 'bdist_egg': bdist_egg if 'bdist_egg' in sys.argv else bdist_egg_disabled, }, scripts=glob(pjoin('scripts', '*')), packages=packages, py_modules=['ipykernel_launcher'], package_data=package_data, description="IPython Kernel for Jupyter", author='IPython Development Team', author_email='ipython-dev@scipy.org', url='https://ipython.org', license='BSD', long_description="The IPython kernel for Jupyter", platforms="Linux, Mac OS X, Windows", keywords=['Interactive', 'Interpreter', 'Shell', 'Web'], python_requires='>=3.5', install_requires=[ 'ipython>=5.0.0', 'traitlets>=4.1.0', 'jupyter_client', 'tornado>=4.2', 'appnope;platform_system=="Darwin"', ], extras_require={ 'test': [ 'pytest !=5.3.4', 'pytest-cov', 'flaky', 'nose', # nose because there are still a few nose.tools imports hanging around ], }, classifiers=[ 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python', 'Programming Language :: Python :: 3', ], ) if any(a.startswith(('bdist', 'install')) for a in sys.argv): from ipykernel.kernelspec import write_kernel_spec, make_ipkernel_cmd, KERNEL_NAME # When building a wheel, the executable specified in the kernelspec is simply 'python'. if any(a.startswith('bdist') for a in sys.argv): argv = make_ipkernel_cmd(executable='python') # When installing from source, the full `sys.executable` can be used. if any(a.startswith('install') for a in sys.argv): argv = make_ipkernel_cmd() dest = os.path.join(here, 'data_kernelspec') if os.path.exists(dest): shutil.rmtree(dest) write_kernel_spec(dest, overrides={'argv': argv}) setup_args['data_files'] = [ ( pjoin('share', 'jupyter', 'kernels', KERNEL_NAME), glob(pjoin('data_kernelspec', '*')), ) ] if __name__ == '__main__': setup(**setup_args)